diff --git a/defaults/defaults.go b/defaults/defaults.go index 11bf9fbc0c..3bdd23fe23 100644 --- a/defaults/defaults.go +++ b/defaults/defaults.go @@ -18,6 +18,9 @@ const ( CASecretCertName = "ca.crt" EncryptionSecretName = "cilium-ipsec-keys" + AKZSecretName = "cilium-azure" + + NodeInitDaemonSetName = "cilium-node-init" OperatorServiceAccountName = "cilium-operator" OperatorClusterRoleName = "cilium-operator" @@ -92,12 +95,6 @@ const ( ) var ( - // OperatorLabels are the labels set on the Cilium operator by default. - OperatorLabels = map[string]string{ - "io.cilium/app": "operator", - "name": "cilium-operator", - } - // RelayDeploymentLabels are the labels set on the Hubble Relay Deployment by default. RelayDeploymentLabels = map[string]string{ "k8s-app": "hubble-relay", diff --git a/go.mod b/go.mod index eb6ec3742e..7fdaecb421 100644 --- a/go.mod +++ b/go.mod @@ -12,6 +12,7 @@ replace ( require ( github.com/blang/semver/v4 v4.0.0 + github.com/cilium/charts v0.0.0-20220221222012-aeec68db76ca github.com/cilium/cilium v1.11.1 github.com/cilium/hubble v0.9.0 github.com/cilium/workerpool v1.1.1 @@ -23,21 +24,30 @@ require ( github.com/spf13/cobra v1.3.0 google.golang.org/grpc v1.44.0 gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c + helm.sh/helm/v3 v3.8.0 k8s.io/api v0.24.0-alpha.0 k8s.io/apimachinery v0.24.0-alpha.0 k8s.io/cli-runtime v0.24.0-alpha.0 k8s.io/client-go v0.24.0-alpha.0 k8s.io/klog/v2 v2.30.0 + sigs.k8s.io/yaml v1.3.0 ) require ( cloud.google.com/go v0.99.0 // indirect + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect github.com/Azure/go-autorest v14.2.0+incompatible // indirect github.com/Azure/go-autorest/autorest v0.11.22 // indirect github.com/Azure/go-autorest/autorest/adal v0.9.17 // indirect github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect github.com/Azure/go-autorest/logger v0.2.1 // indirect github.com/Azure/go-autorest/tracing v0.6.0 // indirect + github.com/BurntSushi/toml v0.4.1 // indirect + github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd // indirect + github.com/Masterminds/goutils v1.1.1 // indirect + github.com/Masterminds/semver/v3 v3.1.1 // indirect + github.com/Masterminds/sprig/v3 v3.2.2 // indirect + github.com/Masterminds/squirrel v1.5.2 // indirect github.com/PuerkitoBio/purell v1.1.1 // indirect github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect github.com/StackExchange/wmi v1.2.1 // indirect @@ -47,17 +57,28 @@ require ( github.com/bgentry/speakeasy v0.1.0 // indirect github.com/census-instrumentation/opencensus-proto v0.3.0 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect + github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 // indirect github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 // indirect github.com/cncf/xds/go v0.0.0-20211130200136-a8f946100490 // indirect + github.com/containerd/containerd v1.5.9 // indirect github.com/coreos/go-semver v0.3.0 // indirect github.com/coreos/go-systemd/v22 v22.3.2 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.1 // indirect + github.com/cyphar/filepath-securejoin v0.2.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect + github.com/docker/cli v20.10.11+incompatible // indirect + github.com/docker/distribution v2.7.1+incompatible // indirect + github.com/docker/docker v20.10.12+incompatible // indirect + github.com/docker/docker-credential-helpers v0.6.4 // indirect + github.com/docker/go-connections v0.4.0 // indirect + github.com/docker/go-metrics v0.0.1 // indirect + github.com/docker/go-units v0.4.0 // indirect github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect github.com/dustin/go-humanize v1.0.0 // indirect github.com/envoyproxy/go-control-plane v0.10.1 // indirect github.com/envoyproxy/protoc-gen-validate v0.6.2 // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect + github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d // indirect github.com/fatih/color v1.13.0 // indirect github.com/form3tech-oss/jwt-go v3.2.3+incompatible // indirect github.com/fsnotify/fsnotify v1.5.1 // indirect @@ -75,6 +96,7 @@ require ( github.com/go-openapi/swag v0.19.15 // indirect github.com/go-openapi/validate v0.20.3 // indirect github.com/go-stack/stack v1.8.1 // indirect + github.com/gobwas/glob v0.2.3 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang-jwt/jwt/v4 v4.1.0 // indirect github.com/golang/glog v1.0.0 // indirect @@ -89,16 +111,19 @@ require ( github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/uuid v1.3.0 // indirect github.com/googleapis/gnostic v0.5.5 // indirect + github.com/gorilla/mux v1.8.0 // indirect github.com/gorilla/websocket v1.4.2 // indirect + github.com/gosuri/uitable v0.0.4 // indirect github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect + github.com/huandu/xstrings v1.3.2 // indirect github.com/imdario/mergo v0.3.12 // indirect github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/jhump/protoreflect v1.8.2 // indirect - github.com/jmoiron/sqlx v1.3.3 // indirect + github.com/jmoiron/sqlx v1.3.4 // indirect github.com/jonboulle/clockwork v0.2.2 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect @@ -106,6 +131,9 @@ require ( github.com/klauspost/pgzip v1.2.5 // indirect github.com/kr/pretty v0.3.0 // indirect github.com/kr/text v0.2.0 // indirect + github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect + github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect + github.com/lib/pq v1.10.4 // indirect github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/magiconair/properties v1.8.5 // indirect @@ -114,14 +142,22 @@ require ( github.com/mattn/go-isatty v0.0.14 // indirect github.com/mattn/go-runewidth v0.0.12 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/mitchellh/copystructure v1.2.0 // indirect + github.com/mitchellh/go-wordwrap v1.0.0 // indirect github.com/mitchellh/mapstructure v1.4.3 // indirect + github.com/mitchellh/reflectwalk v1.0.2 // indirect + github.com/moby/locker v1.0.1 // indirect github.com/moby/spdystream v0.2.0 // indirect + github.com/moby/term v0.0.0-20210610120745-9d4ed1856297 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect + github.com/morikuni/aec v1.0.0 // indirect github.com/nwaples/rardecode v1.1.0 // indirect github.com/oklog/ulid v1.3.1 // indirect github.com/olekukonko/tablewriter v0.0.5 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.0.2 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pelletier/go-toml v1.9.4 // indirect github.com/peterbourgon/diskv v2.0.1+incompatible // indirect @@ -135,9 +171,12 @@ require ( github.com/prometheus/procfs v0.7.3 // indirect github.com/rivo/uniseg v0.2.0 // indirect github.com/rogpeppe/go-internal v1.8.0 // indirect + github.com/rubenv/sql-migrate v0.0.0-20210614095031-55d5740dbbcc // indirect + github.com/russross/blackfriday v1.5.2 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/sasha-s/go-deadlock v0.3.1 // indirect github.com/shirou/gopsutil/v3 v3.21.10 // indirect + github.com/shopspring/decimal v1.2.0 // indirect github.com/sirupsen/logrus v1.8.1 // indirect github.com/soheilhy/cmux v0.1.5 // indirect github.com/spf13/afero v1.6.0 // indirect @@ -151,6 +190,9 @@ require ( github.com/ulikunitz/xz v0.5.10 // indirect github.com/urfave/cli v1.22.5 // indirect github.com/weppos/publicsuffix-go v0.15.1-0.20210511084619-b1f36a2d6c0b // indirect + github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect + github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect + github.com/xeipuuv/gojsonschema v1.2.0 // indirect github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 // indirect github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 // indirect github.com/xlab/treeprint v1.1.0 // indirect @@ -183,31 +225,36 @@ require ( go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.7.0 // indirect go.uber.org/zap v1.19.1 // indirect - golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 // indirect + golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871 // indirect golang.org/x/mod v0.5.0 // indirect - golang.org/x/net v0.0.0-20211101193420-4a448f8816b3 // indirect + golang.org/x/net v0.0.0-20220107192237-5cfca573fb4d // indirect golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect - golang.org/x/sys v0.0.0-20211205182925-97ca703d548d // indirect + golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e // indirect golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 // indirect golang.org/x/text v0.3.7 // indirect golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect golang.org/x/tools v0.1.7 // indirect golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect + google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368 // indirect google.golang.org/protobuf v1.27.1 // indirect gopkg.in/cheggaaa/pb.v1 v1.0.28 // indirect + gopkg.in/gorp.v1 v1.7.2 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.66.2 // indirect gopkg.in/natefinch/lumberjack.v2 v2.0.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect + k8s.io/apiextensions-apiserver v0.23.1 // indirect + k8s.io/apiserver v0.23.1 // indirect + k8s.io/component-base v0.23.1 // indirect k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect + k8s.io/kubectl v0.23.1 // indirect k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b // indirect + oras.land/oras-go v1.1.0 // indirect sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect sigs.k8s.io/kustomize/api v0.10.1 // indirect sigs.k8s.io/kustomize/kyaml v0.13.0 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.0 // indirect - sigs.k8s.io/yaml v1.3.0 // indirect ) diff --git a/go.sum b/go.sum index 20dedf6800..37704b9a2d 100644 --- a/go.sum +++ b/go.sum @@ -68,11 +68,13 @@ github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiU github.com/Azure/azure-sdk-for-go v16.2.1+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v29.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v30.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= +github.com/Azure/azure-sdk-for-go v56.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v59.3.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-service-bus-go v0.9.1/go.mod h1:yzBx6/BUGfjfeqbRZny9AQIbIe3AcV9WZbAdpkoXOa0= github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20210608223527-2377c96fe795/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v10.8.1+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/Azure/go-autorest v12.0.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= @@ -82,12 +84,14 @@ github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKn github.com/Azure/go-autorest/autorest v0.11.12/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= github.com/Azure/go-autorest/autorest v0.11.19/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= +github.com/Azure/go-autorest/autorest v0.11.20/go.mod h1:o3tqFY+QR40VOlk+pV4d77mORO64jOXSgEnPQgLK6JY= github.com/Azure/go-autorest/autorest v0.11.22 h1:bXiQwDjrRmBQOE67bwlvUKAC1EU1yZTPQ38c+bstZws= github.com/Azure/go-autorest/autorest v0.11.22/go.mod h1:BAWYUWGPEtKPzjVkp0Q6an0MJcJDsoh5Z1BFAEFs4Xs= github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= github.com/Azure/go-autorest/autorest/adal v0.9.14/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= +github.com/Azure/go-autorest/autorest/adal v0.9.15/go.mod h1:tGMin8I49Yij6AQ+rvV+Xa/zwxYQB5hmsd6DkfAx2+A= github.com/Azure/go-autorest/autorest/adal v0.9.17 h1:esOPl2dhcz9P3jqBSJ8tPGEj2EqzPPT6zfyuloiogKY= github.com/Azure/go-autorest/autorest/adal v0.9.17/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= github.com/Azure/go-autorest/autorest/azure/auth v0.5.9/go.mod h1:hg3/1yw0Bq87O3KvvnJoAh34/0zbP7SFizX/qN5JvjU= @@ -105,9 +109,12 @@ github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+Z github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= -github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw= +github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= +github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/GeertJohan/go.incremental v1.0.0/go.mod h1:6fAjUhbVuX1KcMD3c8TEgVUqmo4seqhv0i0kdATSkM0= github.com/GeertJohan/go.rice v1.0.2/go.mod h1:af5vUNlDNkCjOZeSGFgIJxDje9qdjsO6hshx0gTmZt4= @@ -115,13 +122,26 @@ github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20191009163259-e802c2cb94ae github.com/HdrHistogram/hdrhistogram-go v1.1.0/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= github.com/HdrHistogram/hdrhistogram-go v1.1.2/go.mod h1:yDgFjdqOqDEKOvasDdhWNXYg9BVp4O+o5f6V/ehm6Oo= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= +github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd h1:sjQovDkwrZp8u+gxLtPgKGjk5hCxuy2hrRejBTA9xFU= +github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= +github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= +github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Masterminds/semver/v3 v3.0.3/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Masterminds/semver/v3 v3.1.0/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= +github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= +github.com/Masterminds/sprig v2.22.0+incompatible h1:z4yfnGrZ7netVz+0EDJ0Wi+5VZCSYp4Z0m2dk6cEM60= github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= +github.com/Masterminds/sprig/v3 v3.2.2 h1:17jRggJu518dr3QaafizSXOjKYp94wKfABxUmyxvxX8= +github.com/Masterminds/sprig/v3 v3.2.2/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk= +github.com/Masterminds/squirrel v1.5.2 h1:UiOEi2ZX4RCSkpiNDQN5kro/XIBpSRk9iTqdIRPzUXE= +github.com/Masterminds/squirrel v1.5.2/go.mod h1:NNaOrjSoIDfDA40n7sr2tPNZRfjzjA400rg+riTZj10= +github.com/Masterminds/vcs v1.13.1/go.mod h1:N09YCmOQr6RLxC6UNHzuVwAdodYbbnycGHSmwVJjcKA= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= @@ -130,6 +150,7 @@ github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugX github.com/Microsoft/go-winio v0.4.17-0.20210211115548-6eac466e5fa3/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.4.17-0.20210324224401-5516f17a5958/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/go-winio v0.4.17/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= +github.com/Microsoft/go-winio v0.5.1 h1:aPJp2QD7OOrhO5tQXqQoGSJc+DjDtWTGLOmNyAm6FgY= github.com/Microsoft/go-winio v0.5.1/go.mod h1:JPGBdM1cNvN/6ISo+n8V5iA4v8pBzdOpzfwIujj1a84= github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/Microsoft/hcsshim v0.8.7-0.20190325164909-8abdbb8205e4/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= @@ -140,16 +161,22 @@ github.com/Microsoft/hcsshim v0.8.15/go.mod h1:x38A4YbHbdxJtc0sF6oIz+RG0npwSCAvn github.com/Microsoft/hcsshim v0.8.16/go.mod h1:o5/SZqmR7x9JNKsW3pu+nqHm0MF8vbA+VxGOoXdC600= github.com/Microsoft/hcsshim v0.8.20/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= github.com/Microsoft/hcsshim v0.8.21/go.mod h1:+w2gRZ5ReXQhFOrvSQeNfhrYB/dg3oDwTOcER2fw4I4= +github.com/Microsoft/hcsshim v0.8.23/go.mod h1:4zegtUJth7lAvFyc6cH2gGQ5B3OFQim01nnU2M8jKDg= +github.com/Microsoft/hcsshim v0.9.1 h1:VfDCj+QnY19ktX5TsH22JHcjaZ05RWQiwDbOyEg5ziM= +github.com/Microsoft/hcsshim v0.9.1/go.mod h1:Y/0uV2jUab5kBI7SQgl62at0AVX7uaruzADAVmxm3eM= github.com/Microsoft/hcsshim/test v0.0.0-20201218223536-d3e5debf77da/go.mod h1:5hlzMzRKMLyo42nCZ9oml8AdTlq/0cvIaBv6tK1RehU= github.com/Microsoft/hcsshim/test v0.0.0-20210227013316-43a75bb4edd3/go.mod h1:mw7qgWloBUl75W/gVH3cQszUg1+gUITj7D6NY7ywVnY= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs= github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= @@ -210,6 +237,7 @@ github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.25.11/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= +github.com/aws/aws-sdk-go v1.34.9/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= github.com/aws/aws-sdk-go v1.40.45/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= @@ -264,10 +292,15 @@ github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bshuster-repo/logrus-logstash-hook v0.4.1/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= +github.com/bshuster-repo/logrus-logstash-hook v1.0.0 h1:e+C0SB5R1pu//O4MQ3f9cFuPGoOVeF2fE4Og9otCc70= +github.com/bshuster-repo/logrus-logstash-hook v1.0.0/go.mod h1:zsTqEiSzDgAa/8GZR7E1qaXrhYNDKBYy5/dWPTIflbk= github.com/buger/jsonparser v0.0.0-20180808090653-f4dd9f5a6b44/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s= github.com/buger/jsonparser v1.1.1/go.mod h1:6RYKKt7H4d4+iWqouImQ9R2FZql3VbhNgx27UK13J/0= +github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd h1:rFt+Y/IK1aEZkEHchZRSq9OQbsSzIT/OrI8YFFmRIng= github.com/bugsnag/bugsnag-go v0.0.0-20141110184014-b1d153021fcd/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= +github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b h1:otBG+dV+YK+Soembjv71DPz3uX/V/6MMlSyD9JBQ6kQ= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= +github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 h1:nvj0OLI3YqYXer/kZD8Ri1aaunCxIEsOst1BVJswV0o= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/caarlos0/ctrlc v1.0.0/go.mod h1:CdXpj4rmq0q/1Eb44M9zi2nKB0QraNKuRGYGrrHhcQw= github.com/campoy/unique v0.0.0-20180121183637-88950e537e7e/go.mod h1:9IOqJGCPMSc6E5ydlp5NIonxObaeu/Iub/X03EKPVYo= @@ -289,12 +322,16 @@ github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghf github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5 h1:7aWHqerlJ41y6FOsEUvknqgXnGmJyJSbjhAWq5pO4F8= +github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw= github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= github.com/christarazi/controller-tools v0.6.2/go.mod h1:oaeGpjXn6+ZSEIQkUe/+3I40PNiDYp9aeawbt3xTgJ8= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/cilium/charts v0.0.0-20220221222012-aeec68db76ca h1:CbweYxYgvlat11ToUW3HNitmA8Z+jEUfsAIR20kYHOI= +github.com/cilium/charts v0.0.0-20220221222012-aeec68db76ca/go.mod h1:M3C9VOlFvRzuV+a01t07Tw4uFLSfkCH3L542IWjf6BU= github.com/cilium/cilium v1.11.0-rc3/go.mod h1:saQ+u3UA8vXGwFvcYpFTTFvhmJtfRiD74B/+yUSs0qE= github.com/cilium/cilium v1.11.1 h1:Q6GRmmZxADW/GxaYKJc2vo2z5n8haBxav3pLCgzjsdI= github.com/cilium/cilium v1.11.1/go.mod h1:xdLxOE/3FhilCPBFrOyhuyUzze3siRoIWmEQ2uJGHQk= @@ -360,6 +397,8 @@ github.com/containerd/cgroups v0.0.0-20200710171044-318312a37340/go.mod h1:s5q4S github.com/containerd/cgroups v0.0.0-20200824123100-0b889c03f102/go.mod h1:s5q4SojHctfxANBDvMeIaIovkq29IP48TKAxnhYRxvo= github.com/containerd/cgroups v0.0.0-20210114181951-8a68de567b68/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= +github.com/containerd/cgroups v1.0.2 h1:mZBclaSgNDfPWtfhj2xJY28LZ9nYIgzB0pwSURPl6JM= +github.com/containerd/cgroups v1.0.2/go.mod h1:qpbpJ1jmlqsR9f2IyaLPsdkCdnt0rbDVqIDlhuu5tRY= github.com/containerd/console v0.0.0-20180822173158-c12b1e7919c1/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20181022165439-0650fd9eeb50/go.mod h1:Tj/on1eG8kiEhd0+fhSDzsPAFESxzBBvdyEgyryXffw= github.com/containerd/console v0.0.0-20191206165004-02ecf6a7291e/go.mod h1:8Pf4gM6VEbTNRIT26AyyU7hxdQU3MvAvxVI0sc00XBE= @@ -373,12 +412,15 @@ github.com/containerd/containerd v1.3.2/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMX github.com/containerd/containerd v1.4.0-beta.2.0.20200729163537-40b22ef07410/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.1/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.4.3/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= +github.com/containerd/containerd v1.4.9/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= github.com/containerd/containerd v1.5.0-beta.1/go.mod h1:5HfvG1V2FsKesEGQ17k5/T7V960Tmcumvqn8Mc+pCYQ= github.com/containerd/containerd v1.5.0-beta.3/go.mod h1:/wr9AVtEM7x9c+n0+stptlo/uBBoBORwEx6ardVcmKU= github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09ZvgqEq8EfBp/m3lcVZIvPHhI= github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= +github.com/containerd/containerd v1.5.9 h1:rs6Xg1gtIxaeyG+Smsb/0xaSDu1VgFhOCKBXxMxbsF4= +github.com/containerd/containerd v1.5.9/go.mod h1:fvQqCfadDGga5HZyn3j4+dx56qj2I9YwBrlSdalvJYQ= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= @@ -406,11 +448,13 @@ github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJ github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c= github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY= +github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM= github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o= github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8= github.com/containerd/ttrpc v1.0.1/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= github.com/containerd/ttrpc v1.0.2/go.mod h1:UAxOpgT9ziI0gJrmKvgcZivgxOp8iFPSk8httJEt98Y= +github.com/containerd/ttrpc v1.1.0/go.mod h1:XX4ZTnoOId4HklF4edwc4DcqskFZuvXB1Evzy5KFQpQ= github.com/containerd/typeurl v0.0.0-20180627222232-a93fcdb778cd/go.mod h1:Cm3kwCdlkCfMSHURc+r6fwoGH6/F1hH3S4sg0rLFWPc= github.com/containerd/typeurl v0.0.0-20190911142611-5eb25027c9fd/go.mod h1:GeKYzf2pQcqv7tJ0AoCuuhtnqhva5LNU3U+OyKxxJpk= github.com/containerd/typeurl v1.0.1/go.mod h1:TB1hUtrpaiO88KEK56ijojHS1+NeF0izUACaJW2mdXg= @@ -459,18 +503,24 @@ github.com/cpuguy83/go-md2man/v2 v2.0.1 h1:r/myEWzV9lfsM1tFLgDyu0atFtJ1fXn261LKY github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= +github.com/cyphar/filepath-securejoin v0.2.3 h1:YX6ebbZCZP7VkM3scTTokDgBL2TY741X51MTk3ycuNI= +github.com/cyphar/filepath-securejoin v0.2.3/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/d2g/dhcp4 v0.0.0-20170904100407-a1d1b6c41b1c/go.mod h1:Ct2BUK8SB0YC1SMSibvLzxjeJLnrYEVLULFNiHY9YfQ= github.com/d2g/dhcp4client v1.0.0/go.mod h1:j0hNfjhrt2SxUOw55nL0ATM/z4Yt3t2Kd1mW34z5W5s= github.com/d2g/dhcp4server v0.0.0-20181031114812-7d4a0a7f59a5/go.mod h1:Eo87+Kg/IX2hfWJfwxMzLyuSZyxSoAug2nGa1G2QAi8= github.com/d2g/hardwareaddr v0.0.0-20190221164911-e7d9fbe030e4/go.mod h1:bMl4RjIciD2oAxI7DmWRx6gbeqrkoLqv3MV0vzNad+I= github.com/daaku/go.zipexe v1.0.0/go.mod h1:z8IiR6TsVLEYKwXAoE/I+8ys/sDkgTzSL0CLnGVd57E= github.com/daaku/go.zipexe v1.0.1/go.mod h1:5xWogtqlYnfBXkSB1o9xysukNP9GTvaNkqzUZbt3Bw8= +github.com/danieljoos/wincred v1.1.0/go.mod h1:XYlo+eRTsVA9aHGp7NGjFkPla4m+DCL7hqDjlFjiygg= github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/daviddengcn/go-colortext v0.0.0-20160507010035-511bcaf42ccd/go.mod h1:dv4zxwHi5C/8AeI+4gX4dCWOIvNi7I6JCSX0HvlKPgE= +github.com/denisenkom/go-mssqldb v0.9.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/denverdino/aliyungo v0.0.0-20190125010748-a747050bb1ba/go.mod h1:dV8lFg6daOBZbT6/BDGIz6Y3WFGn8juu6G+CQ6LHtl0= github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= @@ -479,19 +529,36 @@ github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUn github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= +github.com/distribution/distribution/v3 v3.0.0-20211118083504-a29a3c99a684 h1:DBZ2sN7CK6dgvHVpQsQj4sRMCbWTmd17l+5SUCjnQSY= +github.com/distribution/distribution/v3 v3.0.0-20211118083504-a29a3c99a684/go.mod h1:UfCu3YXJJCI+IdnqGgYP82dk2+Joxmv+mUTVBES6wac= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= +github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= +github.com/docker/cli v20.10.11+incompatible h1:tXU1ezXcruZQRrMP8RN2z9N91h+6egZTS1gsPsKantc= +github.com/docker/cli v20.10.11+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.11+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v20.10.12+incompatible h1:CEeNmFM0QZIsJCZKMkZx0ZcahTiewkrgiwfYD+dfl1U= +github.com/docker/docker v20.10.12+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y= +github.com/docker/docker-credential-helpers v0.6.4 h1:axCks+yV+2MR3/kZhAmy07yC56WZ2Pwu/fKWtKuZB0o= +github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-events v0.0.0-20170721190031-9461782956ad/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= +github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c h1:+pKlWGMw7gf6bQ+oDZB4KHQFypsfjYlq/C4rfL7D3g8= github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c/go.mod h1:Uw6UezgYA44ePAFQYUehOuCzmy5zmg/+nl2ZfMWGkpA= github.com/docker/go-metrics v0.0.0-20180209012529-399ea8c73916/go.mod h1:/u0gXw0Gay3ceNrsHubL3BtdOL2fHf93USgMTe0W5dI= +github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/libnetwork v0.8.0-dev.2.0.20210525090646-64b7a4574d14/go.mod h1:93m0aTqz6z+g32wla4l4WxTrdtvBRmVzYRkYvasA5Z8= +github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1 h1:ZClxb8laGDf5arXfYcAtECDFgAgHklGI8CxgjHnXKJ4= github.com/docker/libtrust v0.0.0-20150114040149-fa567046d9b1/go.mod h1:cyGadeNEkKy96OOhEzfZl+yxihPEzKnqJwvfuSUqbZE= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= @@ -534,11 +601,15 @@ github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQL github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCvpL6mnFh5mB2/l16U= github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d h1:105gxyaGwCFad8crR9dcMQWvV9Hvulu6hwUh4tWPJnM= +github.com/exponent-io/jsonpath v0.0.0-20151013193312-d6023ce2651d/go.mod h1:ZZMPRZwes7CROmyNKgQzC3XPs6L/G2EJLHddWejkmf4= +github.com/fatih/camelcase v1.0.0/go.mod h1:yN2Sb0lFhZJUdVvtELVWefmrXpuZESvPmqwoZc+/fpc= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/fatih/color v1.12.0/go.mod h1:ELkj/draVOlAH/xkhN6mQ50Qd0MPOk5AAr3maGEBuJM= github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/felixge/httpsnoop v1.0.1 h1:lvB5Jl89CsZtGIWuTcDM1E/vkVs49/Ml7JJe07l8SPQ= github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k= @@ -559,6 +630,7 @@ github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXt github.com/fullstorydev/grpcurl v1.8.0/go.mod h1:Mn2jWbdMrQGJQ8UD62uNyMumT2acsZUCkZIqFxsQf1o= github.com/fullstorydev/grpcurl v1.8.1 h1:Pp648wlTTg3OKySeqxM5pzh8XF6vLqrm8wRq66+5Xo0= github.com/fullstorydev/grpcurl v1.8.1/go.mod h1:3BWhvHZwNO7iLXaQlojdg5NA6SxUDePli4ecpK1N7gw= +github.com/fvbommel/sortorder v1.0.1/go.mod h1:uk88iVf1ovNn1iLfgUVU2F9o5eO30ui720w+kxuqRs0= github.com/garyburd/redigo v0.0.0-20150301180006-535138d7bcd7/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= github.com/getsentry/raven-go v0.2.0 h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JYMGs= @@ -616,12 +688,14 @@ github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpX github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.20.1 h1:j23mMDtRxMwIobkpId7sWh7Ddcx4ivaoqUbfXx5P+a8= github.com/go-openapi/errors v0.20.1/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= @@ -649,6 +723,7 @@ github.com/go-openapi/runtime v0.19.16/go.mod h1:5P9104EJgYcizotuXhEuUrzVc+j1RiS github.com/go-openapi/runtime v0.19.24/go.mod h1:Lm9YGCeecBnUUkFTxPC4s1+lwrkJ0pthx8YvyjCfkgk= github.com/go-openapi/runtime v0.21.0 h1:giZ8eT26R+/rx6RX2MkYjZPY8vPYVKDhP/mOazrQHzM= github.com/go-openapi/runtime v0.21.0/go.mod h1:aQg+kaIQEn+A2CRSY1TxbM8+sT9g2V3aLc1FbIAnbbs= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= @@ -675,6 +750,7 @@ github.com/go-openapi/strfmt v0.20.2/go.mod h1:43urheQI9dNtE5lTZQfuFJvjYJKPrxicA github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= github.com/go-openapi/strfmt v0.21.2 h1:5NDNgadiX1Vhemth/TH4gCGopWSTdDjxl60H3B7f+os= github.com/go-openapi/strfmt v0.21.2/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= @@ -699,6 +775,7 @@ github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8w github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw= @@ -723,18 +800,29 @@ github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxs github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= +github.com/gobuffalo/logger v1.0.3 h1:YaXOTHNPCvkqqA7w05A4v0k2tCdpr+sgFlgINbQ6gqc= +github.com/gobuffalo/logger v1.0.3/go.mod h1:SoeejUwldiS7ZsyCBphOGURmWdwUFXs0J7TCjEhjKxM= github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= +github.com/gobuffalo/packd v1.0.0 h1:6ERZvJHfe24rfFmA9OaoKBdC7+c9sydrytMg8SdFGBM= +github.com/gobuffalo/packd v1.0.0/go.mod h1:6VTc4htmJRFB7u1m/4LeMTWjFoYrUiBkU9Fdec9hrhI= github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= +github.com/gobuffalo/packr/v2 v2.8.1 h1:tkQpju6i3EtMXJ9uoF5GT6kB+LMTimDWD8Xvbz6zDVA= +github.com/gobuffalo/packr/v2 v2.8.1/go.mod h1:c/PLlOuTU+p3SybaJATW3H6lX/iK7xEz5OeMf+NnJpg= github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/godbus/dbus v0.0.0-20151105175453-c7fdd8b5cd55/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20180201030542-885f9cc04c9c/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godror/godror v0.24.2/go.mod h1:wZv/9vPiUib6tkoDl+AZ/QLf5YZgMravZ7jxH2eQWAE= +github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.2.0/go.mod h1:Njal3psf3qN6dwBtQfUmBZh2ybovJ0tlu3o/AC7HYjU= github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= @@ -750,6 +838,7 @@ github.com/goji/httpauth v0.0.0-20160601135302-2da839ab0f4d/go.mod h1:nnjvkQ9ptG github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= github.com/golang-jwt/jwt/v4 v4.1.0 h1:XUgk2Ex5veyVFVeLm0xhusUTQybEbexJXrvPNOKkSY0= github.com/golang-jwt/jwt/v4 v4.1.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGwJL78qG/PmXZO1EjYhfJinVAhrmmHX6Z8B9k= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v0.0.0-20210429001901-424d2337a529/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -796,6 +885,9 @@ github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golangplus/testing v0.0.0-20180327235837-af21d9c3145e/go.mod h1:0AA//k/eakGydO4jKRoRL2j92ZKSzTgj9tclaCrvXHk= +github.com/gomodule/redigo v1.8.2 h1:H5XSIre1MB5NbPYFp+i1NBbb5qN1W8Y8YAQoAYbkm8k= +github.com/gomodule/redigo v1.8.2/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= @@ -819,6 +911,7 @@ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-containerregistry v0.5.1/go.mod h1:Ct15B4yir3PLOP5jsy0GNeYVaIZs/MK/Jz5any1wFW0= github.com/google/go-github/v28 v28.1.1/go.mod h1:bsqJWQX05omyWVmc00nEUql9mhQyv38lDZ8kPZcQVoM= github.com/google/go-licenses v0.0.0-20210329231322-ce1d9163b77d/go.mod h1:+TYOmkVoJOpwnS0wfdsJCV9CoD5nJYsHoFk/0CrTK4M= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= @@ -882,14 +975,19 @@ github.com/goreleaser/goreleaser v0.134.0/go.mod h1:ZT6Y2rSYa6NxQzIsdfWWNWAlYGXG github.com/goreleaser/nfpm v1.2.1/go.mod h1:TtWrABZozuLOttX2uDlYyECfQX7x5XYkVxhjYcR6G9w= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/handlers v0.0.0-20150720190736-60c7bfde3e33/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/handlers v1.5.1 h1:9lRY6j8DEeeBT10CvO9hGW0gmky0BprnvDI5vfhUHH4= +github.com/gorilla/handlers v1.5.1/go.mod h1:t8XrUpc4KVXb7HGyJ4/cEnwQiaxrX/hz1Zv/4g96P1Q= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gosuri/uitable v0.0.4 h1:IG2xLKRvErL3uhY6e1BylFzG+aJiwQviDDTfOKeKTpY= +github.com/gosuri/uitable v0.0.4/go.mod h1:tKR86bXuXPZazfOTG1FIzvjIdXzd0mo4Vtn16vt0PJo= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= @@ -957,6 +1055,9 @@ github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpT github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= +github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw= +github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/hudl/fargo v1.4.0/go.mod h1:9Ai6uvFy5fQNq6VPKtg+Ceq1+eTY4nKUlR2JElEOcDo= github.com/iancoleman/strcase v0.0.0-20180726023541-3605ed457bf7/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= @@ -988,11 +1089,14 @@ github.com/jhump/protoreflect v1.8.2/go.mod h1:7GcYQDdMU/O/BBrl/cX6PNHpXh6cenjd8 github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20160803190731-bd40a432e4c7/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jmhodges/clock v0.0.0-20160418191101-880ee4c33548/go.mod h1:hGT6jSUVzF6no3QaDSMLGLEHtHSBSefs+MgcDWnmhmo= -github.com/jmoiron/sqlx v1.3.3 h1:j82X0bf7oQ27XeqxicSZsTU5suPwKElg3oyxNn43iTk= github.com/jmoiron/sqlx v1.3.3/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ= +github.com/jmoiron/sqlx v1.3.4 h1:wv+0IJZfL5z0uZoUjlpKgHkgaFSYD+r9CfrXjEXsO7w= +github.com/jmoiron/sqlx v1.3.4/go.mod h1:2BljVx/86SuTyjE+aPYlHCTNvZrnJXghYGpNiXLBMCQ= +github.com/joefitzgerald/rainbow-reporter v0.1.0/go.mod h1:481CNgqmVHQZzdIbN52CupLJyoVwB10FQ/IQlF1pdL8= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= @@ -1028,6 +1132,8 @@ github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8 github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes= github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= +github.com/karrick/godirwalk v1.15.8 h1:7+rWAZPn9zuRxaIqqT8Ohs2Q2Ac0msBqwRdxNCr2VVs= +github.com/karrick/godirwalk v1.15.8/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kevinburke/ssh_config v1.1.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/keybase/go-ps v0.0.0-20190827175125-91aafc93ba19/go.mod h1:hY+WOq6m2FpbvyrI93sMaypsttvaIL5nhVR92dTMUcQ= @@ -1051,6 +1157,7 @@ github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQ github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kortschak/utter v1.0.1/go.mod h1:vSmSjbyrlKjjsL71193LmzBOKgwePk9DH6uFaWHIInc= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= @@ -1066,14 +1173,23 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/go-gypsy v1.0.0/go.mod h1:chkXM0zjdpXOiqkCW1XcCHDfjfk14PH2KKkQWxfJUcU= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw= +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= +github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= +github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag= github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.1/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.4 h1:SO9z7FRPzA03QhHKJrH5BXA6HU1rS4V2nIVrrNC1iYk= +github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de h1:9TO3cAIGXtEhnIaL+V+BEER86oLrvS+kWobKpbJuye0= github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de/go.mod h1:zAbeS9B/r2mtpb6U+EI2rYA5OAXxsYw6wTamcNW+zcE= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= +github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= +github.com/lithammer/dedent v1.1.0/go.mod h1:jrXYCQtgg0nJiN+StA2KgR7w6CiQNv9Fd/Z9BP0jIOc= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= github.com/lyft/protoc-gen-star v0.5.1/go.mod h1:9toiA3cC7z5uVbODF7kEQ91Xn7XNFkVUl+SrEe+ZORU= @@ -1083,6 +1199,7 @@ github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czP github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls= github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= +github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -1092,7 +1209,12 @@ github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7 github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/markbates/errx v1.1.0 h1:QDFeR+UP95dO12JgW+tgi2UVfo0V8YBHiUIOaeBPiEI= +github.com/markbates/errx v1.1.0/go.mod h1:PLa46Oex9KNbVDZhKel8v1OT7hD5JZ2eI7AHhA0wswc= github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= +github.com/markbates/oncer v1.0.0 h1:E83IaVAHygyndzPimgUYJjbshhDTALZyXxvk9FOlQRY= +github.com/markbates/oncer v1.0.0/go.mod h1:Z59JA581E9GP6w96jai+TGqafHPW+cPfRxz2aSZ0mcI= +github.com/markbates/safe v1.0.1 h1:yjZkbvRM6IzKj9tlu/zMJLS0n/V351OZWRnF3QfaUxI= github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= github.com/marstr/guid v1.1.0/go.mod h1:74gB1z2wpxxInTG6yaqA7KrtM0NZ+RbrcqDvYHefzho= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= @@ -1115,20 +1237,24 @@ github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOA github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-oci8 v0.1.1/go.mod h1:wjDx6Xm9q7dFtHJvIlrI99JytznLw5wQ4R+9mNXJwGI= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.12 h1:Y41i/hVW3Pgwr8gV+J23B9YEY0zxjptBuCWEaxmAOow= github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= +github.com/mattn/go-sqlite3 v1.14.7 h1:fxWBnXkxfM6sRiuH3bqJ4CfzZojMOLVc0UTsTglEghA= github.com/mattn/go-sqlite3 v1.14.7/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= github.com/mattn/go-zglob v0.0.1/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= github.com/mdlayher/arp v0.0.0-20191213142603-f72070a231fc/go.mod h1:eOj1DDj3NAZ6yv+WafaKzY37MFZ58TdfIhQ+8nQbiis= github.com/mdlayher/ethernet v0.0.0-20190313224307-5b5fc417d966/go.mod h1:5s5p/sMJ6sNsFl6uCh85lkFGV8kLuIYJCRJLavVJwvg= github.com/mdlayher/ethernet v0.0.0-20190606142754-0394541c37b7/go.mod h1:U6ZQobyTjI/tJyq2HG+i/dfSoFUt8/aZCM+GKtmFk/Y= @@ -1162,10 +1288,15 @@ github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLT github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= github.com/mitchellh/cli v1.1.0/go.mod h1:xcISNoH86gajksDmfB23e/pu+B+GeFRMYmoHXxx3xhI= +github.com/mitchellh/cli v1.1.2/go.mod h1:6iaV0fGdElS6dPBx0EApTxHrcWvmJphyh2n8YBLPPZ4= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= +github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= +github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/go-testing-interface v1.0.0/go.mod h1:kRemZodwjscx+RGhAo8eIhFbs2+BFgRtFPeD/KE+zxI= +github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4= +github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo= github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= @@ -1180,14 +1311,20 @@ github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= +github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= +github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= github.com/moby/sys/mountinfo v0.4.0/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= github.com/moby/sys/mountinfo v0.4.1/go.mod h1:rEr8tzG/lsIZHBtN/JjGG+LMYx9eXgW2JI+6q0qou+A= +github.com/moby/sys/mountinfo v0.5.0 h1:2Ks8/r6lopsxWi9m58nlwjaeSzUX9iiL1vj5qB/9ObI= +github.com/moby/sys/mountinfo v0.5.0/go.mod h1:3bMD3Rg+zkqx8MRYPi7Pyb0Ie97QEBmdxbhnCLlSvSU= github.com/moby/sys/symlink v0.1.0/go.mod h1:GGDODQmbFOjFsXvfLVn3+ZRxkch54RkSiGqsZeMYowQ= github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo= github.com/moby/term v0.0.0-20201216013528-df9cb8a40635/go.mod h1:FBS0z0QWA44HXygs7VXDUOGoN/1TV3RuWkLO04am3wc= +github.com/moby/term v0.0.0-20210610120745-9d4ed1856297 h1:yH0SvLzcbZxcJXho2yh7CqdENGMQe73Cw3woZBpPli0= github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= @@ -1200,6 +1337,7 @@ github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwd github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 h1:n6/2gBQ3RWajuToeY6ZtZTIKv2v7ThUy5KKusIT0yc0= github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00/go.mod h1:Pm3mSP3c5uWn86xMLZ5Sa7JB9GsEZySvHYXCTK4E9q4= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mreiferson/go-httpclient v0.0.0-20160630210159-31f0106b4474/go.mod h1:OQA4XLvDbMgS8P0CevmM4m9Q3Jq4phKUzcocxuGJ5m8= github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= @@ -1244,9 +1382,11 @@ github.com/onsi/ginkgo v0.0.0-20151202141238-7f8ab55aaf3b/go.mod h1:lLunBs/Ym6LB github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.13.0/go.mod h1:+REjRxOmWfHCjfv9TTWB1jD1Frx4XydAD3zm1lskyM0= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= @@ -1260,6 +1400,7 @@ github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.3/go.mod h1:V9xEwhxec5O8UDM77eCW8vLymOMltsqPVYWrpDsH8xc= github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= @@ -1272,9 +1413,12 @@ github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1 github.com/opencontainers/go-digest v0.0.0-20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= github.com/opencontainers/go-digest v1.0.0-rc1.0.20180430190053-c9281466c8b2/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= +github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= github.com/opencontainers/runc v1.0.0-rc8.0.20190926000215-3e425f80a8c9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U= @@ -1326,6 +1470,8 @@ github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+v github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ= github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o= +github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2 h1:JhzVVoYvbOACxoUmOs6V/G4D5nPVUW73rKvXxP4XUJc= +github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2/go.mod h1:iIss55rKnNBTvrwdmkUpLnDpZoAHvWaiq5+iMmen4AE= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= @@ -1412,10 +1558,13 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.5.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/rogpeppe/go-internal v1.8.0 h1:FCbCCtXNOY3UtUuHUYaghJg4y7Fd14rXifAYUAtL9R8= github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= +github.com/rubenv/sql-migrate v0.0.0-20210614095031-55d5740dbbcc h1:BD7uZqkN8CpjJtN/tScAKiccBikU4dlqe/gNrkRaPY4= +github.com/rubenv/sql-migrate v0.0.0-20210614095031-55d5740dbbcc/go.mod h1:HFLT6i9iR4QBOF5rdCyjddC9t59ArqWJV2xx+jwcCMo= github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= @@ -1432,6 +1581,7 @@ github.com/sasha-s/go-deadlock v0.3.1/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZj github.com/sassoftware/go-rpmutils v0.0.0-20190420191620-a8f1baeba37b/go.mod h1:am+Fp8Bt506lA3Rk3QCmSqmYmLMnPDhdDUcosQCAx+I= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw= +github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24q7p+U= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= @@ -1442,6 +1592,8 @@ github.com/servak/go-fastping v0.0.0-20160802140958-5718d12e20a0/go.mod h1:udnTW github.com/shirou/gopsutil/v3 v3.21.9/go.mod h1:YWp/H8Qs5fVmf17v7JNZzA0mPJ+mS2e9JdiUF9LlKzQ= github.com/shirou/gopsutil/v3 v3.21.10 h1:flTg1DrnV/UVrBqjLgVgDJzx6lf+91rC64/dBHmO2IA= github.com/shirou/gopsutil/v3 v3.21.10/go.mod h1:t75NhzCZ/dYyPQjyQmrAYP6c8+LCdFANeBMdLPCNnew= +github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.0.6/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= @@ -1479,6 +1631,7 @@ github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/cobra v0.0.6/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= @@ -1587,9 +1740,13 @@ github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+ github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f h1:J9EGpcZtP0E/raorCMxlFGSTBrsSlaDGf3jU/qvAE2c= github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= +github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= +github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofmx9yWTog9BfvIu0q41lo= github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2 h1:eY9dn8+vbi4tKz5Qo6v2eYzo7kUS51QINcR5jNpbZS8= @@ -1605,9 +1762,13 @@ github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43 h1:+lm10QQTNSBd8DVTNGHx7o/IKu9HYDvLMffDhbyLccI= github.com/yvasiyarov/go-metrics v0.0.0-20140926110328-57bccd1ccd43/go.mod h1:aX5oPXxHm3bOH+xeAttToC8pqch2ScQN/JoXYupl6xs= +github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50 h1:hlE8//ciYMztlGpl/VA+Zm1AcTPHYkHJPbHqE6WJUXE= github.com/yvasiyarov/gorelic v0.0.0-20141212073537-a9bba5b9ab50/go.mod h1:NUSPSUX/bi6SeDMUh6brw0nXpxHnc96TguQh0+r/ssA= +github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f h1:ERexzlUfuTvpE74urLSbIQW0Z/6hF9t8U4NsJLaioAY= github.com/yvasiyarov/newrelic_platform_go v0.0.0-20140908184405-b21fdbd4370f/go.mod h1:GlGEuHIJweS1mbCqG+7vt2nvWLzLLnRHbXz5JKd/Qbg= +github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs= github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= github.com/zmap/rc2 v0.0.0-20131011165748-24b9757f5521/go.mod h1:3YZ9o3WnatTIZhuOtot4IcUfzoKVjUHqu6WALIyI0nE= github.com/zmap/zcertificate v0.0.0-20180516150559-0e3d58b1bac4/go.mod h1:5iU54tB79AMBcySS0R2XIyZBAVmeHranShAFELYx7is= @@ -1685,6 +1846,7 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= +go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opentelemetry.io/contrib v0.20.0 h1:ubFQUn0VCZ0gPwIoJfBJVpeBlyRMxu8Mm/huKWYd9p0= go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= @@ -1746,6 +1908,7 @@ golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190320223903-b7391e95e576/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -1758,10 +1921,13 @@ golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191117063200-497ca9f6d64f/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20191122220453-ac88ee75c92c/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201124201722-c8d3bf9c5392/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= @@ -1771,8 +1937,9 @@ golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf/go.mod h1:P+XmwS30IXTQdn5 golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210915214749-c084706c2272/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871 h1:/pEO3GD/ABYAjuakUS6xSEmmlyVS4kxBNkeA9tLJiTI= +golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1895,8 +2062,11 @@ golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210927181540-4e4d966f7476/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210928044308-7d9f5e0b762b/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20211101193420-4a448f8816b3 h1:VrJZAjbekhoRn7n5FBujY31gboH+iB3pdLxn3gE9FjU= golang.org/x/net v0.0.0-20211101193420-4a448f8816b3/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220107192237-5cfca573fb4d h1:62NvYBuaanGXR2ZOfwDFkhhl6X1DUgf8qg3GuQvxZsE= +golang.org/x/net v0.0.0-20220107192237-5cfca573fb4d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -1968,6 +2138,7 @@ golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190620070143-6f217b454f45/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190812073006-9eafafc0a87e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -2072,10 +2243,12 @@ golang.org/x/sys v0.0.0-20210917161153-d61c044b1678/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211103184734-ae416a5f93c7/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211205182925-97ca703d548d h1:FjkYO/PPp4Wi0EAUOVLxePm7qVW4r4ctbWpURyuOD0E= golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -2105,6 +2278,7 @@ golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -2128,6 +2302,7 @@ golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190706070813-72ffa07ba3db/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190729092621-ff9f1409240a/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -2155,6 +2330,7 @@ golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200308013534-11ec41452d41/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200324175852-6fb6f5a9fc59/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= @@ -2164,6 +2340,7 @@ golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200522201501-cb1345f3a375/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200717024301-6ddee64345a6/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= @@ -2171,6 +2348,7 @@ golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201014170642-d1624618ad65/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -2284,6 +2462,7 @@ google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -2329,8 +2508,9 @@ google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ6 google.golang.org/genproto v0.0.0-20211129164237-f09f9a12af12/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211203200212-54befc351ae9/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa h1:I0YcKz0I7OAhddo7ya8kMnvprhcWM045PmkBdMO9zN0= google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368 h1:Et6SkiuvnBn+SgrSYXs/BrUpGB4mbdwt4R3vaPIlicA= +google.golang.org/genproto v0.0.0-20220107163113-42d7afdf6368/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= @@ -2367,6 +2547,7 @@ google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc v1.44.0 h1:weqSxi/TMs1SqFRMHCtBgXRs8k3X39QIDEZ0pRcttUg= google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= @@ -2401,6 +2582,8 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= +gopkg.in/gorp.v1 v1.7.2 h1:j3DWlAyGVv8whO7AcIWznQ2Yj7yJkn34B8s63GViAAw= +gopkg.in/gorp.v1 v1.7.2/go.mod h1:Wo3h+DBQZIxATwftsglhdD/62zRFPhGhTiu5jUJmCaw= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= @@ -2436,9 +2619,13 @@ gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= +gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= +helm.sh/helm/v3 v3.8.0 h1:vlQQDDQkrH4NECOFbGcwjjKyHL5Sa3xNLjMxXm7fMVo= +helm.sh/helm/v3 v3.8.0/go.mod h1:0nYPSuvuj8TTJDLRSAfbzGGbazPZsayaDpP8s9FfZT8= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -2455,11 +2642,14 @@ k8s.io/api v0.20.6/go.mod h1:X9e8Qag6JV/bL5G6bU8sdVRltWKmdHsFUGS3eVndqE8= k8s.io/api v0.21.3/go.mod h1:hUgeYHUbBp23Ue4qdX9tR8/ANi/g3ehylAqDn9NWVOg= k8s.io/api v0.22.3/go.mod h1:azgiXFiXqiWyLCfI62/eYBOu19rj2LKmIhFPP4+33fs= k8s.io/api v0.23.0/go.mod h1:8wmDdLBHBNxtOIytwLstXt5E9PddnZb0GaMcqsvDBpg= +k8s.io/api v0.23.1/go.mod h1:WfXnOnwSqNtG62Y1CdjoMxh7r7u9QXGCkA1u0na2jgo= k8s.io/api v0.24.0-alpha.0 h1:M/31Z1GKgTE0/fWu2/arAfvIoMmL/oQ5ykYN9RTzgKg= k8s.io/api v0.24.0-alpha.0/go.mod h1:deWo+4OwHTtia+zwLZirxNA0Unp1Y/a4lsmydSRUqp8= k8s.io/apiextensions-apiserver v0.21.3/go.mod h1:kl6dap3Gd45+21Jnh6utCx8Z2xxLm8LGDkprcd+KbsE= k8s.io/apiextensions-apiserver v0.22.3/go.mod h1:f4plF+CXeqI89jAXL0Ml4LI/kSAZ54JS94+XOX1sae8= k8s.io/apiextensions-apiserver v0.23.0/go.mod h1:xIFAEEDlAZgpVBl/1VSjGDmLoXAWRG40+GsWhKhAxY4= +k8s.io/apiextensions-apiserver v0.23.1 h1:xxE0q1vLOVZiWORu1KwNRQFsGWtImueOrqSl13sS5EU= +k8s.io/apiextensions-apiserver v0.23.1/go.mod h1:0qz4fPaHHsVhRApbtk3MGXNn2Q9M/cVWWhfHdY2SxiM= k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.2/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= k8s.io/apimachinery v0.20.4/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU= @@ -2467,6 +2657,7 @@ k8s.io/apimachinery v0.20.6/go.mod h1:ejZXtW1Ra6V1O5H8xPBGz+T3+4gfkTCeExAHKU57MA k8s.io/apimachinery v0.21.3/go.mod h1:H/IM+5vH9kZRNJ4l3x/fXP/5bOPJaVP/guptnZPeCFI= k8s.io/apimachinery v0.22.3/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0= k8s.io/apimachinery v0.23.0/go.mod h1:fFCTTBKvKcwTPFzjlcxp91uPFZr+JA0FubU4fLzzFYc= +k8s.io/apimachinery v0.23.1/go.mod h1:SADt2Kl8/sttJ62RRsi9MIV4o8f5S3coArm0Iu3fBno= k8s.io/apimachinery v0.24.0-alpha.0 h1:FChRBdwz3dJDpfIBXaSRlDQQ6FOHiRvgAHaRAynx73s= k8s.io/apimachinery v0.24.0-alpha.0/go.mod h1:SqloDTPqePPNhEp8K4qUgqpKc3tE+ymn05iIUbSAQ7g= k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU= @@ -2475,6 +2666,9 @@ k8s.io/apiserver v0.20.6/go.mod h1:QIJXNt6i6JB+0YQRNcS0hdRHJlMhflFmsBDeSgT1r8Q= k8s.io/apiserver v0.21.3/go.mod h1:eDPWlZG6/cCCMj/JBcEpDoK+I+6i3r9GsChYBHSbAzU= k8s.io/apiserver v0.22.3/go.mod h1:oam7lH/F1Kto/WTamyQYrD68fS0mGUBORAFf6x/9Mxs= k8s.io/apiserver v0.23.0/go.mod h1:Cec35u/9zAepDPPFyT+UMrgqOCjgJ5qtfVJDxjZYmt4= +k8s.io/apiserver v0.23.1 h1:vWGf8LcV9Pk/z5rdLmCiBDqE21ccbe930dzrtVMhw9g= +k8s.io/apiserver v0.23.1/go.mod h1:Bqt0gWbeM2NefS8CjWswwd2VNAKN6lUKR85Ft4gippY= +k8s.io/cli-runtime v0.23.1/go.mod h1:r9r8H/qfXo9w+69vwUL7LokKlLRKW5D6A8vUKCx+YL0= k8s.io/cli-runtime v0.24.0-alpha.0 h1:o3xx1RrdTLH6dwlT0KntsWaueiOyI7Ov+vY5+4TL6Bs= k8s.io/cli-runtime v0.24.0-alpha.0/go.mod h1:9tFZCnHDd5u56V2j4jj4M/ql7bAj09adJ8TdY608DdI= k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y= @@ -2484,23 +2678,31 @@ k8s.io/client-go v0.20.6/go.mod h1:nNQMnOvEUEsOzRRFIIkdmYOjAZrC8bgq0ExboWSU1I0= k8s.io/client-go v0.21.3/go.mod h1:+VPhCgTsaFmGILxR/7E1N0S+ryO010QBeNCv5JwRGYU= k8s.io/client-go v0.22.3/go.mod h1:ElDjYf8gvZsKDYexmsmnMQ0DYO8W9RwBjfQ1PI53yow= k8s.io/client-go v0.23.0/go.mod h1:hrDnpnK1mSr65lHHcUuIZIXDgEbzc7/683c6hyG4jTA= +k8s.io/client-go v0.23.1/go.mod h1:6QSI8fEuqD4zgFK0xbdwfB/PthBsIxCJMa3s17WlcO0= k8s.io/client-go v0.24.0-alpha.0 h1:qeU8M+gy+bTDzJQMMy6VsvYXtNJAPkP0QvJw/Cl7gnA= k8s.io/client-go v0.24.0-alpha.0/go.mod h1:tQEmSne6/r+o6F5m9q0zx2R7zgm9xXmnNuIgsUWcMxw= +k8s.io/code-generator v0.19.7/go.mod h1:lwEq3YnLYb/7uVXLorOJfxg+cUu2oihFhHZ0n9NIla0= k8s.io/code-generator v0.21.3/go.mod h1:K3y0Bv9Cz2cOW2vXUrNZlFbflhuPvuadW6JdnN6gGKo= k8s.io/code-generator v0.22.3/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o= k8s.io/code-generator v0.23.0/go.mod h1:vQvOhDXhuzqiVfM/YHp+dmg10WDZCchJVObc9MvowsE= +k8s.io/code-generator v0.23.1/go.mod h1:V7yn6VNTCWW8GqodYCESVo95fuiEg713S8B7WacWZDA= k8s.io/component-base v0.20.1/go.mod h1:guxkoJnNoh8LNrbtiQOlyp2Y2XFCZQmrcg2n/DeYNLk= k8s.io/component-base v0.20.4/go.mod h1:t4p9EdiagbVCJKrQ1RsA5/V4rFQNDfRlevJajlGwgjI= k8s.io/component-base v0.20.6/go.mod h1:6f1MPBAeI+mvuts3sIdtpjljHWBQ2cIy38oBIWMYnrM= k8s.io/component-base v0.21.3/go.mod h1:kkuhtfEHeZM6LkX0saqSK8PbdO7A0HigUngmhhrwfGQ= k8s.io/component-base v0.22.3/go.mod h1:kuybv1miLCMoOk3ebrqF93GbQHQx6W2287FC0YEQY6s= k8s.io/component-base v0.23.0/go.mod h1:DHH5uiFvLC1edCpvcTDV++NKULdYYU6pR9Tt3HIKMKI= +k8s.io/component-base v0.23.1 h1:j/BqdZUWeWKCy2v/jcgnOJAzpRYWSbGcjGVYICko8Uc= +k8s.io/component-base v0.23.1/go.mod h1:6llmap8QtJIXGDd4uIWJhAq0Op8AtQo6bDW2RrNMTeo= +k8s.io/component-helpers v0.23.1/go.mod h1:ZK24U+2oXnBPcas2KolLigVVN9g5zOzaHLkHiQMFGr0= k8s.io/cri-api v0.17.3/go.mod h1:X1sbHmuXhwaHs9xxYffLqJogVsnI+f6cPRcgPel7ywM= k8s.io/cri-api v0.20.1/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.20.4/go.mod h1:2JRbKt+BFLTjtrILYVqQK5jqhI+XNdF6UiGMgczeBCI= k8s.io/cri-api v0.20.6/go.mod h1:ew44AjNXwyn1s0U4xCKGodU7J1HzBeZ1MpGrpa5r8Yc= k8s.io/gengo v0.0.0-20200205140755-e0e292d8aa12/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20200428234225-8167cfdcfc14/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/gengo v0.0.0-20201113003025-83324d819ded/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/klog v0.3.1/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= @@ -2513,18 +2715,24 @@ k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec= k8s.io/klog/v2 v2.30.0 h1:bUO6drIvCIsvZ/XFgfxoGFQU/a4Qkh0iAlvUR7vlHJw= k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM= k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE= k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw= k8s.io/kube-openapi v0.0.0-20211029090450-ec1f4c89925a/go.mod h1:9aku95Vs/jVMtCgHMT4ILvnQ1TvDBztPz+yZ/XAlpkw= k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 h1:E3J9oCLlaobFUqsjG9DfKbP2BmgwBL2p7pn0A3dG9W4= k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk= +k8s.io/kubectl v0.23.1 h1:gmscOiV4Y4XIRIn14gQBBADoyyVrDZPbxRCTDga4RSA= +k8s.io/kubectl v0.23.1/go.mod h1:Ui7dJKdUludF8yWAOSN7JZEkOuYixX5yF6E6NjoukKE= k8s.io/kubernetes v1.13.0/go.mod h1:ocZa8+6APFNC2tX1DZASIbocyYT5jHzqFVsY5aoB7Jk= +k8s.io/metrics v0.23.1/go.mod h1:qXvsM1KANrc+ZZeFwj6Phvf0NLiC+d3RwcsLcdGc+xs= k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b h1:wxEMGetGMur3J1xuGLQY7GEQYg9bZxKn3tKo5k/eYcs= k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +oras.land/oras-go v1.1.0 h1:tfWM1RT7PzUwWphqHU6ptPU3ZhwVnSw/9nEGf519rYg= +oras.land/oras-go v1.1.0/go.mod h1:1A7vR/0KknT2UkJVWh+xMi95I/AhK8ZrxrnUSmXN0bQ= pack.ag/amqp v0.11.2/go.mod h1:4/cbmt4EJXSKlG6LCfWHoqmN0uFdy5i/+YFz+fTfhV4= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/goversion v1.2.0/go.mod h1:Eih9y/uIBS3ulggl7KNJ09xGSLcuNaLgmvvqa07sgfo= @@ -2540,8 +2748,11 @@ sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtud sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs= sigs.k8s.io/kustomize/api v0.10.1 h1:KgU7hfYoscuqag84kxtzKdEC3mKMb99DPI3a0eaV1d0= sigs.k8s.io/kustomize/api v0.10.1/go.mod h1:2FigT1QN6xKdcnGS2Ppp1uIWrtWN28Ms8A3OZUZhwr8= +sigs.k8s.io/kustomize/cmd/config v0.10.2/go.mod h1:K2aW7nXJ0AaT+VA/eO0/dzFLxmpFcTzudmAgDwPY1HQ= +sigs.k8s.io/kustomize/kustomize/v4 v4.4.1/go.mod h1:qOKJMMz2mBP+vcS7vK+mNz4HBLjaQSWRY22EF6Tb7Io= sigs.k8s.io/kustomize/kyaml v0.13.0 h1:9c+ETyNfSrVhxvphs+K2dzT3dh5oVPPEqPOE/cUpScY= sigs.k8s.io/kustomize/kyaml v0.13.0/go.mod h1:FTJxEZ86ScK184NpGSAQcfEqee0nul8oLCK30D47m4E= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.0.3/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= diff --git a/install/azure.go b/install/azure.go index 3d37015799..7b2b4f94a7 100644 --- a/install/azure.go +++ b/install/azure.go @@ -7,6 +7,14 @@ import ( "context" "encoding/json" "fmt" + + "github.com/cilium/cilium/pkg/versioncheck" + "helm.sh/helm/v3/pkg/strvals" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/yaml" + + "github.com/cilium/cilium-cli/defaults" ) type azureVersionValidation struct{} @@ -183,3 +191,64 @@ func (k *K8sInstaller) azExec(args ...string) ([]byte, error) { args = append(args, "--output", "json", "--only-show-errors") return k.Exec("az", args...) } + +func (k *K8sInstaller) createAKSSecrets(ctx context.Context) error { + // Check if secret already exists and reuse it + _, err := k.client.GetSecret(ctx, k.params.Namespace, defaults.AKZSecretName, metav1.GetOptions{}) + if err == nil { + k.Log("🔑 Found existing AKS secret %s", defaults.AKZSecretName) + return nil + } + + var ( + v = map[string]interface{}{} + helmOpts string + secretFileName string + ) + + ciliumVer := k.getCiliumVersion() + switch { + case versioncheck.MustCompile(">=1.11.0")(ciliumVer): + helmOpts = fmt.Sprintf("operator.enabled,azure.enabled,azure.clientID=%s,azure.clientSecret=%s", + k.params.Azure.ClientID, k.params.Azure.ClientSecret) + secretFileName = "templates/cilium-operator/secret.yaml" + default: + // TODO(aanm) fix me + panic("unsupported version") + } + + err = strvals.ParseInto(helmOpts, v) + if err != nil { + // Developer mistake, this shouldn't happen + panic(err) + } + rel, err := k.helmClient.RunWithContext(context.TODO(), k.helmChart, v) + if err != nil { + panic(err) + } + + secretFile, err := FilterManifests(rel.Manifest, []string{secretFileName}) + if err != nil { + panic(err) + } + + var secret corev1.Secret + err = yaml.Unmarshal([]byte(secretFile[secretFileName]), &secret) + if err != nil { + // Developer mistake, this shouldn't happen + panic(err) + } + + k.Log("🔑 Generated AKS secret %s", defaults.AKZSecretName) + _, err = k.client.CreateSecret(ctx, k.params.Namespace, &secret, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("unable to create AKS secret %s/%s: %w", k.params.Namespace, defaults.AKZSecretName, err) + } + k.pushRollbackStep(func(ctx context.Context) { + if err := k.client.DeleteSecret(ctx, k.params.Namespace, defaults.AKZSecretName, metav1.DeleteOptions{}); err != nil { + k.Log("Cannot delete %s Secret: %s", defaults.AKZSecretName, err) + } + }) + + return nil +} diff --git a/install/helm.go b/install/helm.go new file mode 100644 index 0000000000..af88101368 --- /dev/null +++ b/install/helm.go @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2020 Authors of Cilium +// Copyright The Helm Authors. + +package install + +import ( + "bytes" + "fmt" + "path/filepath" + "regexp" + "sort" + "strings" + + "helm.sh/helm/v3/pkg/releaseutil" +) + +// FilterManifests a map of generated manifests. The Key is the filename and the +// Value is its manifest. +func FilterManifests(manifest string, showFiles []string) (map[string]string, error) { + // if we have a list of files to render, then check that each of the + // provided files exists in the chart. + if len(showFiles) == 0 { + return nil, nil + } + // This is necessary to ensure consistent manifest ordering when using --show-only + // with globs or directory names. + var manifests bytes.Buffer + fmt.Fprintln(&manifests, strings.TrimSpace(manifest)) + + splitManifests := releaseutil.SplitManifests(manifests.String()) + manifestsKeys := make([]string, 0, len(splitManifests)) + for k := range splitManifests { + manifestsKeys = append(manifestsKeys, k) + } + sort.Sort(releaseutil.BySplitManifestsOrder(manifestsKeys)) + + manifestNameRegex := regexp.MustCompile("# Source: [^/]+/(.+)") + var ( + manifestsToRender = map[string]string{} + ) + + for _, f := range showFiles { + missing := true + // Use linux-style filepath separators to unify user's input path + f = filepath.ToSlash(f) + for _, manifestKey := range manifestsKeys { + manifest := splitManifests[manifestKey] + submatch := manifestNameRegex.FindStringSubmatch(manifest) + if len(submatch) == 0 { + continue + } + manifestName := submatch[1] + // manifest.Name is rendered using linux-style filepath separators on Windows as + // well as macOS/linux. + manifestPathSplit := strings.Split(manifestName, "/") + // manifest.Path is connected using linux-style filepath separators on Windows as + // well as macOS/linux + manifestPath := strings.Join(manifestPathSplit, "/") + + // if the filepath provided matches a manifest path in the + // chart, render that manifest + if matched, _ := filepath.Match(f, manifestPath); !matched { + continue + } + manifestsToRender[f] = manifest + missing = false + } + if missing { + return nil, fmt.Errorf("could not find template %s in chart", f) + } + } + return manifestsToRender, nil +} diff --git a/install/install.go b/install/install.go index b19158906c..ff46cfa981 100644 --- a/install/install.go +++ b/install/install.go @@ -12,9 +12,14 @@ import ( "time" "github.com/blang/semver/v4" + ciliumhelm "github.com/cilium/charts" "github.com/cilium/cilium/api/v1/models" ciliumv2 "github.com/cilium/cilium/pkg/k8s/apis/cilium.io/v2" "github.com/cilium/cilium/pkg/versioncheck" + "helm.sh/helm/v3/pkg/action" + "helm.sh/helm/v3/pkg/chart" + "helm.sh/helm/v3/pkg/chart/loader" + "helm.sh/helm/v3/pkg/strvals" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" networkingv1 "k8s.io/api/networking/v1" @@ -22,7 +27,7 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" - "k8s.io/apimachinery/pkg/util/intstr" + "sigs.k8s.io/yaml" "github.com/cilium/cilium-cli/defaults" "github.com/cilium/cilium-cli/internal/certs" @@ -31,19 +36,6 @@ import ( "github.com/cilium/cilium-cli/status" ) -var ( - agentMaxUnavailable = intstr.FromInt(2) - varTrue = true - hostToContainer = corev1.MountPropagationHostToContainer - agentTerminationGracePeriodSeconds = int64(1) - hostPathDirectoryOrCreate = corev1.HostPathDirectoryOrCreate - hostPathFileOrCreate = corev1.HostPathFileOrCreate - secretDefaultMode = int32(0400) - operatorReplicas = int32(1) - operatorMaxSurge = intstr.FromInt(1) - operatorMaxUnavailable = intstr.FromInt(1) -) - const ( ipamKubernetes = "kubernetes" ipamClusterPool = "cluster-pool" @@ -57,927 +49,120 @@ const ( encryptionWireguard = "wireguard" ) -var ciliumClusterRole = &rbacv1.ClusterRole{ - ObjectMeta: metav1.ObjectMeta{ - Name: defaults.AgentClusterRoleName, - }, - Rules: []rbacv1.PolicyRule{ - { - APIGroups: []string{"networking.k8s.io"}, - Resources: []string{"networkpolicies"}, - Verbs: []string{"get", "list", "watch"}, - }, - { - APIGroups: []string{"discovery.k8s.io"}, - Resources: []string{"endpointslices"}, - Verbs: []string{"get", "list", "watch"}, - }, - { - APIGroups: []string{""}, - Resources: []string{"namespaces", "services", "endpoints"}, - Verbs: []string{"get", "list", "watch"}, - }, - { - APIGroups: []string{""}, - Resources: []string{"pods", "pods/finalizers"}, - Verbs: []string{"get", "list", "watch", "update", "delete"}, - }, - { - APIGroups: []string{""}, - Resources: []string{"nodes"}, - Verbs: []string{"get", "list", "watch", "update"}, - }, - { - APIGroups: []string{""}, - Resources: []string{"nodes", "nodes/status"}, - Verbs: []string{"patch"}, - }, - { - APIGroups: []string{"apiextensions.k8s.io"}, - Resources: []string{"customresourcedefinitions"}, - Verbs: []string{"list", "watch"}, - }, - { - APIGroups: []string{"cilium.io"}, - Resources: []string{ - "ciliumnetworkpolicies", - "ciliumnetworkpolicies/status", - "ciliumnetworkpolicies/finalizers", - "ciliumclusterwidenetworkpolicies", - "ciliumclusterwidenetworkpolicies/status", - "ciliumclusterwidenetworkpolicies/finalizers", - "ciliumendpoints", - "ciliumendpoints/status", - "ciliumendpoints/finalizers", - "ciliumnodes", - "ciliumnodes/status", - "ciliumnodes/finalizers", - "ciliumidentities", - "ciliumidentities/finalizers", - "ciliumlocalredirectpolicies", - "ciliumlocalredirectpolicies/status", - "ciliumlocalredirectpolicies/finalizers", - "ciliumegressnatpolicies", - "ciliumegressnatpolicies/status", - "ciliumegressnatpolicies/finalizers", - "ciliumenvoyconfigs", - "ciliumenvoyconfigs/status", - "ciliumenvoyconfigs/finalizers", - }, - Verbs: []string{"*"}, - }, - }, -} - -var operatorClusterRole = &rbacv1.ClusterRole{ - ObjectMeta: metav1.ObjectMeta{ - Name: defaults.OperatorClusterRoleName, - }, - Rules: []rbacv1.PolicyRule{ - // to automatically delete [core|kube]dns pods so that are starting to being - // managed by Cilium - { - APIGroups: []string{""}, - Resources: []string{"pods"}, - Verbs: []string{"get", "list", "watch", "delete"}, - }, - { - APIGroups: []string{"discovery.k8s.io"}, - Resources: []string{"endpointslices"}, - Verbs: []string{"get", "list", "watch"}, - }, - { - APIGroups: []string{""}, - Resources: []string{ - "namespaces", // to check apiserver connectivity - "secrets", // for ingress controller / envoy-config to access TLS certificates - }, - Verbs: []string{"get", "list", "watch"}, - }, - { - APIGroups: []string{""}, - Resources: []string{ - // - to perform the translation of a CNP that contains `ToGroup` to its endpoints - // - ingress controller creates / deletes services / endpoints. - "services", "endpoints", - }, - Verbs: []string{"get", "list", "watch", "create", "update", "delete"}, - }, - { - - APIGroups: []string{""}, - Resources: []string{"services/status"}, // to perform LB IP allocation for BGP - Verbs: []string{"update"}, - }, - { - APIGroups: []string{"cilium.io"}, - Resources: []string{ - "ciliumnetworkpolicies", - "ciliumnetworkpolicies/status", - "ciliumnetworkpolicies/finalizers", - "ciliumclusterwidenetworkpolicies", - "ciliumclusterwidenetworkpolicies/status", - "ciliumclusterwidenetworkpolicies/finalizers", - "ciliumendpoints", - "ciliumendpoints/status", - "ciliumendpoints/finalizers", - "ciliumnodes", - "ciliumnodes/status", - "ciliumnodes/finalizers", - "ciliumidentities", - "ciliumidentities/status", - "ciliumidentities/finalizers", - "ciliumlocalredirectpolicies", - "ciliumlocalredirectpolicies/status", - "ciliumlocalredirectpolicies/finalizers", - "ciliumegressnatpolicies", - "ciliumegressnatpolicies/status", - "ciliumegressnatpolicies/finalizers", - "ciliumenvoyconfigs", - "ciliumenvoyconfigs/status", - "ciliumenvoyconfigs/finalizers", - }, - Verbs: []string{"*"}, - }, - { - APIGroups: []string{"apiextensions.k8s.io"}, - Resources: []string{"customresourcedefinitions"}, - Verbs: []string{"create", "get", "list", "watch", "update"}, - }, - // For cilium-operator running in HA mode. - // - // Cilium operator running in HA mode requires the use of - // ResourceLock for Leader Election between multiple running - // instances. The preferred way of doing this is to use - // LeasesResourceLock as edits to Leases are less common and - // fewer objects in the cluster watch "all Leases". The - // support for leases was introduced in coordination.k8s.io/v1 - // during Kubernetes 1.14 release. In Cilium we currently - // don't support HA mode for K8s version < 1.14. This condition - // make sure that we only authorize access to leases resources - // in supported K8s versions. - { - APIGroups: []string{"coordination.k8s.io"}, - Resources: []string{"leases"}, - Verbs: []string{"create", "get", "update"}, - }, - // For ingress controller - { - APIGroups: []string{"networking.k8s.io"}, - Resources: []string{"ingresses"}, - Verbs: []string{"get", "list", "watch"}, - }, - { - APIGroups: []string{"networking.k8s.io"}, - Resources: []string{"ingresses/status"}, // To update ingress status with load balancer IP. - Verbs: []string{"update"}, - }, - }, -} - func (k *K8sInstaller) generateAgentDaemonSet() *appsv1.DaemonSet { - ds := &appsv1.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: defaults.AgentDaemonSetName, - Labels: map[string]string{ - "k8s-app": "cilium", - }, - }, - Spec: appsv1.DaemonSetSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{ - "k8s-app": "cilium", - }, - }, - UpdateStrategy: appsv1.DaemonSetUpdateStrategy{ - Type: appsv1.RollingUpdateDaemonSetStrategyType, - RollingUpdate: &appsv1.RollingUpdateDaemonSet{ - MaxUnavailable: &agentMaxUnavailable, - }, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Name: defaults.AgentDaemonSetName, - Labels: map[string]string{ - "k8s-app": "cilium", - }, - }, - Spec: corev1.PodSpec{ - Affinity: &corev1.Affinity{ - PodAntiAffinity: &corev1.PodAntiAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{ - { - LabelSelector: &metav1.LabelSelector{ - MatchExpressions: []metav1.LabelSelectorRequirement{ - {Key: "k8s-app", Operator: metav1.LabelSelectorOpIn, Values: []string{"cilium"}}, - }, - }, - TopologyKey: "kubernetes.io/hostname", - }, - }, - }, - }, - HostNetwork: true, - RestartPolicy: corev1.RestartPolicyAlways, - PriorityClassName: "system-node-critical", - ServiceAccountName: defaults.AgentServiceAccountName, - DeprecatedServiceAccount: defaults.AgentServiceAccountName, // TODO(tgraf) do we still need this? - TerminationGracePeriodSeconds: &agentTerminationGracePeriodSeconds, - Tolerations: []corev1.Toleration{ - { - Operator: corev1.TolerationOpExists, - }, - }, - Containers: []corev1.Container{ - { - Name: defaults.AgentContainerName, - Command: []string{"cilium-agent"}, - Args: []string{"--config-dir=/tmp/cilium/config-map"}, - Image: k.fqAgentImage(utils.ImagePathIncludeDigest), - ImagePullPolicy: corev1.PullIfNotPresent, - LivenessProbe: &corev1.Probe{ - ProbeHandler: corev1.ProbeHandler{ - HTTPGet: &corev1.HTTPGetAction{ - Host: "127.0.0.1", - Path: "/healthz", - Port: intstr.FromInt(9876), - Scheme: corev1.URISchemeHTTP, - HTTPHeaders: []corev1.HTTPHeader{ - { - Name: "brief", - Value: "true", - }, - }, - }, - }, - TimeoutSeconds: int32(5), - SuccessThreshold: int32(1), - PeriodSeconds: int32(30), - InitialDelaySeconds: int32(120), - FailureThreshold: int32(10), - }, - ReadinessProbe: &corev1.Probe{ - ProbeHandler: corev1.ProbeHandler{ - HTTPGet: &corev1.HTTPGetAction{ - Host: "127.0.0.1", - Path: "/healthz", - Port: intstr.FromInt(9876), - Scheme: corev1.URISchemeHTTP, - HTTPHeaders: []corev1.HTTPHeader{ - { - Name: "brief", - Value: "true", - }, - }, - }, - }, - TimeoutSeconds: int32(5), - SuccessThreshold: int32(1), - PeriodSeconds: int32(30), - InitialDelaySeconds: int32(5), - FailureThreshold: int32(3), - }, - Env: []corev1.EnvVar{ - { - Name: "K8S_NODE_NAME", - ValueFrom: &corev1.EnvVarSource{ - FieldRef: &corev1.ObjectFieldSelector{ - APIVersion: "v1", - FieldPath: "spec.nodeName", - }, - }, - }, - { - Name: "CILIUM_K8S_NAMESPACE", - ValueFrom: &corev1.EnvVarSource{ - FieldRef: &corev1.ObjectFieldSelector{ - APIVersion: "v1", - FieldPath: "metadata.namespace", - }, - }, - }, - { - Name: "CILIUM_FLANNEL_MASTER_DEVICE", - ValueFrom: &corev1.EnvVarSource{ - ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "cilium-config", - }, - Key: "flannel-master-device", - Optional: &varTrue, - }, - }, - }, - { - Name: "CILIUM_FLANNEL_UNINSTALL_ON_EXIT", - ValueFrom: &corev1.EnvVarSource{ - ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "cilium-config", - }, - Key: "flannel-uninstall-on-exit", - Optional: &varTrue, - }, - }, - }, - { - Name: "CILIUM_CLUSTERMESH_CONFIG", - Value: "/var/lib/cilium/clustermesh/", - }, - { - Name: "CILIUM_CNI_CHAINING_MODE", - ValueFrom: &corev1.EnvVarSource{ - ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "cilium-config", - }, - Key: "cni-chaining-mode", - Optional: &varTrue, - }, - }, - }, - { - Name: "CILIUM_CUSTOM_CNI_CONF", - ValueFrom: &corev1.EnvVarSource{ - ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "cilium-config", - }, - Key: "custom-cni-conf", - Optional: &varTrue, - }, - }, - }, - }, - Lifecycle: &corev1.Lifecycle{ - PostStart: &corev1.LifecycleHandler{ - Exec: &corev1.ExecAction{ - Command: []string{"/cni-install.sh", "--enable-debug=false"}, - }, - }, - PreStop: &corev1.LifecycleHandler{ - Exec: &corev1.ExecAction{ - Command: []string{"/cni-uninstall.sh"}, - }, - }, - }, - SecurityContext: &corev1.SecurityContext{ - Capabilities: &corev1.Capabilities{ - Add: []corev1.Capability{"NET_ADMIN", "SYS_MODULE"}, - }, - Privileged: &varTrue, - }, - VolumeMounts: []corev1.VolumeMount{ - { - Name: "bpf-maps", - MountPath: "/sys/fs/bpf", - }, - { - Name: "cilium-run", - MountPath: "/var/run/cilium", - }, - { - Name: "cni-path", - MountPath: "/host/opt/cni/bin", - }, - { - Name: "etc-cni-netd", - MountPath: "/host/etc/cni/net.d", - }, - { - Name: "clustermesh-secrets", - MountPath: "/var/lib/cilium/clustermesh", - ReadOnly: true, - }, - { - Name: "cilium-config-path", - MountPath: "/tmp/cilium/config-map", - ReadOnly: true, - }, - { - // Needed to be able to load kernel modules - Name: "lib-modules", - MountPath: "/lib/modules", - ReadOnly: true, - }, - { - Name: "xtables-lock", - MountPath: "/run/xtables.lock", - }, - { - Name: "hubble-tls", - MountPath: "/var/lib/cilium/tls/hubble", - ReadOnly: true, - }, - }, - }, - }, - InitContainers: []corev1.Container{ - { - Name: "clean-cilium-state", - Command: []string{"/init-container.sh"}, - Image: k.fqAgentImage(utils.ImagePathIncludeDigest), - ImagePullPolicy: corev1.PullIfNotPresent, - Env: []corev1.EnvVar{ - { - Name: "CILIUM_ALL_STATE", - ValueFrom: &corev1.EnvVarSource{ - ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "cilium-config", - }, - Key: "clean-cilium-state", - Optional: &varTrue, - }, - }, - }, - { - Name: "CILIUM_BPF_STATE", - ValueFrom: &corev1.EnvVarSource{ - ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "cilium-config", - }, - Key: "clean-cilium-bpf-state", - Optional: &varTrue, - }, - }, - }, - { - Name: "CILIUM_WAIT_BPF_MOUNT", - ValueFrom: &corev1.EnvVarSource{ - ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "cilium-config", - }, - Key: "wait-bpf-mount", - Optional: &varTrue, - }, - }, - }, - }, - SecurityContext: &corev1.SecurityContext{ - Capabilities: &corev1.Capabilities{ - Add: []corev1.Capability{"NET_ADMIN"}, - }, - Privileged: &varTrue, - }, - VolumeMounts: []corev1.VolumeMount{ - { - Name: "bpf-maps", - MountPath: "/sys/fs/bpf", - MountPropagation: &hostToContainer, - }, - { - Name: "cilium-run", - MountPath: "/var/run/cilium", - }, - }, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("100m"), - corev1.ResourceMemory: resource.MustParse("100Mi"), - }, - }, - }, - }, - Volumes: []corev1.Volume{ - { - // To keep state between restarts / upgrades - Name: "cilium-run", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: k.daemonRunPathOnHost(), - Type: &hostPathDirectoryOrCreate, - }, - }, - }, - { - // To keep state between restarts / upgrades for bpf maps - Name: "bpf-maps", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/sys/fs/bpf", - Type: &hostPathDirectoryOrCreate, - }, - }, - }, - { - // To install cilium cni plugin in the host - Name: "cni-path", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: k.cniBinPathOnHost(), - Type: &hostPathDirectoryOrCreate, - }, - }, - }, - { - // To install cilium cni configuration in the host - Name: "etc-cni-netd", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: k.cniConfPathOnHost(), - Type: &hostPathDirectoryOrCreate, - }, - }, - }, - { - // To be able to load kernel modules - Name: "lib-modules", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/lib/modules", - }, - }, - }, - { - // To access iptables concurrently with other processes (e.g. kube-proxy) - Name: "xtables-lock", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/run/xtables.lock", - Type: &hostPathFileOrCreate, - }, - }, - }, - { - // To read the clustermesh configuration - Name: "clustermesh-secrets", - VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: "cilium-clustermesh", - Optional: &varTrue, - DefaultMode: &secretDefaultMode, - }, - }, - }, - { - // To read the configuration from the config map - Name: "cilium-config-path", - VolumeSource: corev1.VolumeSource{ - ConfigMap: &corev1.ConfigMapVolumeSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "cilium-config", - }, - }, - }, - }, - { - Name: "hubble-tls", - VolumeSource: corev1.VolumeSource{ - Projected: &corev1.ProjectedVolumeSource{ - DefaultMode: &secretDefaultMode, - Sources: []corev1.VolumeProjection{ - { - Secret: &corev1.SecretProjection{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: defaults.HubbleServerSecretName, - }, - Items: []corev1.KeyToPath{ - { - Key: corev1.TLSCertKey, - Path: "server.crt", - }, - { - Key: corev1.TLSPrivateKeyKey, - Path: "server.key", - }, - { - Key: defaults.CASecretCertName, - Path: "client-ca.crt", - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - }, - } - - nodeInitContainers := []corev1.Container{} - auxVolumes := []corev1.Volume{} - auxVolumeMounts := []corev1.VolumeMount{} + var ( + v = map[string]interface{}{} + dsFilename, helmOpts string + ) - if k.params.Encryption == encryptionIPsec { - auxVolumes = append(auxVolumes, corev1.Volume{ - Name: "cilium-ipsec-secrets", - VolumeSource: corev1.VolumeSource{ - Secret: &corev1.SecretVolumeSource{ - SecretName: defaults.EncryptionSecretName, - }, - }, - }) - - auxVolumeMounts = append(auxVolumeMounts, corev1.VolumeMount{ - Name: "cilium-ipsec-secrets", - MountPath: "/etc/ipsec", - }) - } + // TODO(aanm) fix kind installation - switch k.flavor.Kind { - case k8s.KindGKE: - nodeInitContainers = append(nodeInitContainers, corev1.Container{ - Name: "wait-for-node-init", - Image: k.fqAgentImage(utils.ImagePathIncludeDigest), - ImagePullPolicy: corev1.PullIfNotPresent, - Command: []string{"sh", "-c", `until stat /tmp/cilium-bootstrap/time > /dev/null 2>&1; do echo "Waiting for GKE node-init to run..."; sleep 1; done`}, - VolumeMounts: []corev1.VolumeMount{ - { - Name: "cilium-bootstrap", - MountPath: "/tmp/cilium-bootstrap", - }, - }, - }) + ciliumVer := k.getCiliumVersion() + switch { + case versioncheck.MustCompile(">=1.11.0")(ciliumVer): + dsFilename = "templates/cilium-agent/daemonset.yaml" - auxVolumes = append(auxVolumes, corev1.Volume{ - Name: "cilium-bootstrap", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/tmp/cilium-bootstrap", - Type: &hostPathDirectoryOrCreate, - }, - }, - }) + helmOpts = fmt.Sprintf(`agent=true,operator.enabled=false,serviceAccounts.cilium.name=%s`, defaults.AgentServiceAccountName) + if k.params.Encryption == encryptionIPsec { + helmOpts += `,encryption.enabled,encryption.type=ipsec` + } + switch k.flavor.Kind { + case k8s.KindGKE: + helmOpts += `,nodeinit.enabled,nodeinit.bootstrapFile` + } + if k.bgpEnabled() { + helmOpts += `,bgp.enabled` + } + default: + // TODO(aanm) fix me + panic("unsupported version") } - mountCmd := `mount | grep "/sys/fs/bpf type bpf" || { echo "Mounting eBPF filesystem..."; mount bpffs /sys/fs/bpf -t bpf; }` - nodeInitContainers = append(nodeInitContainers, corev1.Container{ - Name: "ebpf-mount", - Image: k.fqAgentImage(utils.ImagePathIncludeDigest), - ImagePullPolicy: corev1.PullIfNotPresent, - Command: []string{"nsenter", "--mount=/hostproc/1/ns/mnt", "--", "sh", "-c", mountCmd}, - SecurityContext: &corev1.SecurityContext{ - Privileged: &varTrue, - // This doesn't work yet for some reason. It would allow to drop privileged mode:w - // - // Capabilities: &corev1.Capabilities{ - // Add: []corev1.Capability{"SYS_PTRACE", "SYS_ADMIN", "SYS_CHROOT"}, - // }, - }, - VolumeMounts: []corev1.VolumeMount{ - { - Name: "host-proc", - MountPath: "/hostproc", - }, - }, - Resources: corev1.ResourceRequirements{ - Requests: corev1.ResourceList{ - corev1.ResourceCPU: resource.MustParse("100m"), - corev1.ResourceMemory: resource.MustParse("100Mi"), - }, - }, - }) - - auxVolumes = append(auxVolumes, corev1.Volume{ - Name: "host-proc", - VolumeSource: corev1.VolumeSource{ - HostPath: &corev1.HostPathVolumeSource{ - Path: "/proc", - Type: &hostPathDirectoryOrCreate, - }, - }, - }) + err := strvals.ParseInto(helmOpts, v) + if err != nil { + // Developer mistake, this shouldn't happen + panic(err) + } - ds.Spec.Template.Spec.InitContainers = append(nodeInitContainers, ds.Spec.Template.Spec.InitContainers...) - ds.Spec.Template.Spec.Volumes = append(ds.Spec.Template.Spec.Volumes, auxVolumes...) - ds.Spec.Template.Spec.Containers[0].VolumeMounts = append(ds.Spec.Template.Spec.Containers[0].VolumeMounts, auxVolumeMounts...) + rel, err := k.helmClient.RunWithContext(context.TODO(), k.helmChart, v) + if err != nil { + panic(err) + } - if k.bgpEnabled() { - ds.Spec.Template.Spec.Containers[0].VolumeMounts = append(ds.Spec.Template.Spec.Containers[0].VolumeMounts, - corev1.VolumeMount{ - Name: "bgp-config-path", - ReadOnly: true, - MountPath: "/var/lib/cilium/bgp", - }, - ) - ds.Spec.Template.Spec.Volumes = append(ds.Spec.Template.Spec.Volumes, corev1.Volume{ - Name: "bgp-config-path", - VolumeSource: corev1.VolumeSource{ - ConfigMap: &corev1.ConfigMapVolumeSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "bgp-config", - }, - }, - }, - }) + dsFile, err := FilterManifests(rel.Manifest, []string{dsFilename}) + if err != nil { + panic(err) } - return ds + var ds appsv1.DaemonSet + err = yaml.Unmarshal([]byte(dsFile[dsFilename]), &ds) + if err != nil { + // Developer mistake, this shouldn't happen + panic(err) + } + return &ds } func (k *K8sInstaller) generateOperatorDeployment() *appsv1.Deployment { - deployment := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{ - Name: defaults.OperatorDeploymentName, - Labels: defaults.OperatorLabels, - }, - Spec: appsv1.DeploymentSpec{ - Replicas: &operatorReplicas, - Selector: &metav1.LabelSelector{ - MatchLabels: defaults.OperatorLabels, - }, - Strategy: appsv1.DeploymentStrategy{ - Type: appsv1.RollingUpdateDeploymentStrategyType, - RollingUpdate: &appsv1.RollingUpdateDeployment{ - MaxUnavailable: &operatorMaxUnavailable, - MaxSurge: &operatorMaxSurge, - }, - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Name: "cilium-operator", - Labels: defaults.OperatorLabels, - }, - Spec: corev1.PodSpec{ - RestartPolicy: corev1.RestartPolicyAlways, - PriorityClassName: "system-cluster-critical", - ServiceAccountName: defaults.OperatorServiceAccountName, - DeprecatedServiceAccount: defaults.OperatorServiceAccountName, // TODO(tgraf) do we still need this? - TerminationGracePeriodSeconds: &agentTerminationGracePeriodSeconds, - HostNetwork: true, - Tolerations: []corev1.Toleration{ - { - Operator: corev1.TolerationOpExists, - }, - }, - Affinity: &corev1.Affinity{ - PodAffinity: &corev1.PodAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: []corev1.PodAffinityTerm{ - { - LabelSelector: &metav1.LabelSelector{ - MatchExpressions: []metav1.LabelSelectorRequirement{ - {Key: "io.cilium/app", Operator: metav1.LabelSelectorOpIn, Values: []string{"operator"}}, - }, - }, - TopologyKey: "kubernetes.io/hostname", - }, - }, - }, - }, - Containers: []corev1.Container{ - { - Name: "cilium-operator", - Command: k.operatorCommand(), - Args: []string{"--config-dir=/tmp/cilium/config-map"}, - Image: k.fqOperatorImage(utils.ImagePathIncludeDigest), - ImagePullPolicy: corev1.PullIfNotPresent, - Env: []corev1.EnvVar{ - { - Name: "K8S_NODE_NAME", - ValueFrom: &corev1.EnvVarSource{ - FieldRef: &corev1.ObjectFieldSelector{ - APIVersion: "v1", - FieldPath: "spec.nodeName", - }, - }, - }, - { - Name: "CILIUM_K8S_NAMESPACE", - ValueFrom: &corev1.EnvVarSource{ - FieldRef: &corev1.ObjectFieldSelector{ - APIVersion: "v1", - FieldPath: "metadata.namespace", - }, - }, - }, - { - Name: "CILIUM_DEBUG", - ValueFrom: &corev1.EnvVarSource{ - ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "cilium-config", - }, - Key: "debug", - Optional: &varTrue, - }, - }, - }, - }, - VolumeMounts: []corev1.VolumeMount{ - { - Name: "cilium-config-path", - MountPath: "/tmp/cilium/config-map", - ReadOnly: true, - }, - }, - }, - }, - Volumes: []corev1.Volume{ - { - // To read the configuration from the config map - Name: "cilium-config-path", - VolumeSource: corev1.VolumeSource{ - ConfigMap: &corev1.ConfigMapVolumeSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "cilium-config", - }, - }, - }, - }, - }, - }, - }, - }, - } + var ( + v = map[string]interface{}{} + deployFilename, helmOpts string + ) - switch k.params.DatapathMode { - case DatapathAwsENI: - c := &deployment.Spec.Template.Spec.Containers[0] - c.Env = append(c.Env, corev1.EnvVar{ - Name: "AWS_ACCESS_KEY_ID", - ValueFrom: &corev1.EnvVarSource{ - SecretKeyRef: &corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "cilium-aws", - }, - Key: "AWS_ACCESS_KEY_ID", - Optional: &varTrue, - }, - }, - }) + ciliumVer := k.getCiliumVersion() + switch { + case versioncheck.MustCompile(">=1.11.0")(ciliumVer): + deployFilename = "templates/cilium-operator/deployment.yaml" - c.Env = append(c.Env, corev1.EnvVar{ - Name: "AWS_SECRET_ACCESS_KEY", - ValueFrom: &corev1.EnvVarSource{ - SecretKeyRef: &corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "cilium-aws", - }, - Key: "AWS_SECRET_ACCESS_KEY", - Optional: &varTrue, - }, - }, - }) + // TODO(aanm) to keep the previous behavior unchanged we will set the number + // of the operator replicas to 1. Ideally this should be the default in the helm chart + helmOpts += "operator.replicas=1" + helmOpts += fmt.Sprintf(`,agent=false,operator.enabled=true,serviceAccounts.operator.name=%s`, defaults.OperatorServiceAccountName) - c.Env = append(c.Env, corev1.EnvVar{ - Name: "AWS_DEFAULT_REGION", - ValueFrom: &corev1.EnvVarSource{ - SecretKeyRef: &corev1.SecretKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "cilium-aws", - }, - Key: "AWS_DEFAULT_REGION", - Optional: &varTrue, - }, - }, - }) - - case DatapathAzure: - c := &deployment.Spec.Template.Spec.Containers[0] - c.Env = append(c.Env, corev1.EnvVar{ - Name: "AZURE_SUBSCRIPTION_ID", - Value: k.params.Azure.SubscriptionID, - }) + switch k.params.DatapathMode { + case DatapathAwsENI: + helmOpts += ",eni.enabled" + case DatapathAzure: + helmOpts += ",azure.enabled" + helmOpts += fmt.Sprintf(",azure.subscriptionID=%s", k.params.Azure.SubscriptionID) + helmOpts += fmt.Sprintf(",azure.tenantID=%s", k.params.Azure.TenantID) + helmOpts += fmt.Sprintf(",azure.resourceGroup=%s", k.params.Azure.AKSNodeResourceGroup) + helmOpts += fmt.Sprintf(",azure.resourceGroup=%s", k.params.Azure.AKSNodeResourceGroup) + } - c.Env = append(c.Env, corev1.EnvVar{ - Name: "AZURE_TENANT_ID", - Value: k.params.Azure.TenantID, - }) + if k.bgpEnabled() { + helmOpts += ",bgp.enabled" + } - c.Env = append(c.Env, corev1.EnvVar{ - Name: "AZURE_RESOURCE_GROUP", - Value: k.params.Azure.AKSNodeResourceGroup, - }) + default: + // TODO(aanm) fix me + panic("unsupported version") + } - c.Env = append(c.Env, corev1.EnvVar{ - Name: "AZURE_CLIENT_ID", - Value: k.params.Azure.ClientID, - }) + err := strvals.ParseInto(helmOpts, v) + if err != nil { + // Developer mistake, this shouldn't happen + panic(err) + } - c.Env = append(c.Env, corev1.EnvVar{ - Name: "AZURE_CLIENT_SECRET", - Value: k.params.Azure.ClientSecret, - }) + rel, err := k.helmClient.RunWithContext(context.TODO(), k.helmChart, v) + if err != nil { + panic(err) } - if k.bgpEnabled() { - deployment.Spec.Template.Spec.Containers[0].VolumeMounts = append(deployment.Spec.Template.Spec.Containers[0].VolumeMounts, - corev1.VolumeMount{ - Name: "bgp-config-path", - ReadOnly: true, - MountPath: "/var/lib/cilium/bgp", - }, - ) - deployment.Spec.Template.Spec.Volumes = append(deployment.Spec.Template.Spec.Volumes, corev1.Volume{ - Name: "bgp-config-path", - VolumeSource: corev1.VolumeSource{ - ConfigMap: &corev1.ConfigMapVolumeSource{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "bgp-config", - }, - }, - }, - }) + deployFile, err := FilterManifests(rel.Manifest, []string{deployFilename}) + if err != nil { + panic(err) } - return deployment + var deploy appsv1.Deployment + err = yaml.Unmarshal([]byte(deployFile[deployFilename]), &deploy) + if err != nil { + // Developer mistake, this shouldn't happen + panic(err) + } + return &deploy } type k8sInstallerImplementation interface { @@ -1036,6 +221,8 @@ type K8sInstaller struct { flavor k8s.Flavor certManager *certs.CertManager rollbackSteps []rollbackStep + helmChart *chart.Chart + helmClient *action.Install } const ( @@ -1112,35 +299,6 @@ func (p *Parameters) validate() error { return nil } -func (k *K8sInstaller) cniBinPathOnHost() string { - switch k.flavor.Kind { - case k8s.KindGKE: - return "/home/kubernetes/bin" - case k8s.KindMicrok8s: - return Microk8sSnapPath + "/opt/cni/bin" - } - - return "/opt/cni/bin" -} - -func (k *K8sInstaller) cniConfPathOnHost() string { - switch k.flavor.Kind { - case k8s.KindMicrok8s: - return Microk8sSnapPath + "/args/cni-network" - } - - return "/etc/cni/net.d" -} - -func (k *K8sInstaller) daemonRunPathOnHost() string { - switch k.flavor.Kind { - case k8s.KindMicrok8s: - return Microk8sSnapPath + "/var/run/cilium" - } - - return "/var/run/cilium" -} - func (k *K8sInstaller) fqAgentImage(imagePathMode utils.ImagePathMode) string { return utils.BuildImagePath(k.params.AgentImage, k.params.Version, defaults.AgentImage, defaults.Version, imagePathMode) } @@ -1157,15 +315,27 @@ func (k *K8sInstaller) fqOperatorImage(imagePathMode utils.ImagePathMode) string return utils.BuildImagePath(k.params.OperatorImage, k.params.Version, defaultImage, defaults.Version, imagePathMode) } -func (k *K8sInstaller) operatorCommand() []string { - switch k.params.DatapathMode { - case DatapathAwsENI: - return []string{"cilium-operator-aws"} - case DatapathAzure: - return []string{"cilium-operator-azure"} +func newHelmClientChart(ciliumVersion, namespace, k8sVersion string) (*action.Install, *chart.Chart, error) { + actionConfig := new(action.Configuration) + helmClient := action.NewInstall(actionConfig) + helmClient.DryRun = true + helmClient.ReleaseName = "release-name" + helmClient.Replace = true // Skip the name check + helmClient.ClientOnly = true + helmClient.APIVersions = []string{k8sVersion} + helmClient.Namespace = namespace + + helmTgz, err := ciliumhelm.HelmFS.ReadFile(fmt.Sprintf("cilium-%s.tgz", ciliumVersion)) + if err != nil { + return nil, nil, fmt.Errorf("cilium version not found: %s", err) } - return []string{"cilium-operator-generic"} + // Check chart dependencies to make sure all are present in /charts + helmChart, err := loader.LoadArchive(bytes.NewReader(helmTgz)) + if err != nil { + return nil, nil, fmt.Errorf("unable to read cilium helm chart: %s", err) + } + return helmClient, helmChart, nil } func NewK8sInstaller(client k8sInstallerImplementation, p Parameters) (*K8sInstaller, error) { @@ -1175,10 +345,19 @@ func NewK8sInstaller(client k8sInstallerImplementation, p Parameters) (*K8sInsta cm := certs.NewCertManager(client, certs.Parameters{Namespace: p.Namespace}) + // TODO(aanm) get k8s version from somewhere else + ersion := strings.TrimPrefix(p.Version, "v") + helmClient, helmChart, err := newHelmClientChart(ersion, p.Namespace, "v1.22.0") + if err != nil { + return nil, err + } + return &K8sInstaller{ client: client, params: p, certManager: cm, + helmClient: helmClient, + helmChart: helmChart, }, nil } @@ -1383,11 +562,6 @@ func (k *K8sInstaller) generateConfigMap() (*corev1.ConfigMap, error) { m.Data["enable-bpf-masquerade"] = "false" } - // Put the init script in place (if any). - if initScript, exists := nodeInitScript[k.flavor.Kind]; exists { - m.Data[nodeInitScriptConfigMapKey(k.flavor.Kind)] = strings.ReplaceAll(initScript, "{{ .Values.cni.binPath }}", k.cniBinPathOnHost()) - } - switch k.params.Encryption { case encryptionIPsec: m.Data["enable-ipsec"] = "true" @@ -1601,6 +775,10 @@ func (k *K8sInstaller) Install(ctx context.Context) error { if err := k.aksSetup(ctx); err != nil { return err } + + if err := k.createAKSSecrets(ctx); err != nil { + return err + } } if err := k.installCerts(ctx); err != nil { @@ -1608,7 +786,7 @@ func (k *K8sInstaller) Install(ctx context.Context) error { } k.Log("🚀 Creating Service accounts...") - if _, err := k.client.CreateServiceAccount(ctx, k.params.Namespace, k8s.NewServiceAccount(defaults.AgentServiceAccountName), metav1.CreateOptions{}); err != nil { + if _, err := k.client.CreateServiceAccount(ctx, k.params.Namespace, k.NewServiceAccount(defaults.AgentServiceAccountName), metav1.CreateOptions{}); err != nil { return err } k.pushRollbackStep(func(ctx context.Context) { @@ -1617,7 +795,7 @@ func (k *K8sInstaller) Install(ctx context.Context) error { } }) - if _, err := k.client.CreateServiceAccount(ctx, k.params.Namespace, k8s.NewServiceAccount(defaults.OperatorServiceAccountName), metav1.CreateOptions{}); err != nil { + if _, err := k.client.CreateServiceAccount(ctx, k.params.Namespace, k.NewServiceAccount(defaults.OperatorServiceAccountName), metav1.CreateOptions{}); err != nil { return err } k.pushRollbackStep(func(ctx context.Context) { @@ -1627,7 +805,7 @@ func (k *K8sInstaller) Install(ctx context.Context) error { }) k.Log("🚀 Creating Cluster roles...") - if _, err := k.client.CreateClusterRole(ctx, ciliumClusterRole, metav1.CreateOptions{}); err != nil { + if _, err := k.client.CreateClusterRole(ctx, k.NewClusterRole(defaults.AgentClusterRoleName), metav1.CreateOptions{}); err != nil { return err } k.pushRollbackStep(func(ctx context.Context) { @@ -1636,7 +814,7 @@ func (k *K8sInstaller) Install(ctx context.Context) error { } }) - if _, err := k.client.CreateClusterRoleBinding(ctx, k8s.NewClusterRoleBinding(defaults.AgentClusterRoleName, k.params.Namespace, defaults.AgentServiceAccountName), metav1.CreateOptions{}); err != nil { + if _, err := k.client.CreateClusterRoleBinding(ctx, k.NewClusterRoleBinding(defaults.AgentClusterRoleName, defaults.AgentServiceAccountName), metav1.CreateOptions{}); err != nil { return err } k.pushRollbackStep(func(ctx context.Context) { @@ -1645,7 +823,7 @@ func (k *K8sInstaller) Install(ctx context.Context) error { } }) - if _, err := k.client.CreateClusterRole(ctx, operatorClusterRole, metav1.CreateOptions{}); err != nil { + if _, err := k.client.CreateClusterRole(ctx, k.NewClusterRole(defaults.OperatorClusterRoleName), metav1.CreateOptions{}); err != nil { return err } k.pushRollbackStep(func(ctx context.Context) { @@ -1654,7 +832,7 @@ func (k *K8sInstaller) Install(ctx context.Context) error { } }) - if _, err := k.client.CreateClusterRoleBinding(ctx, k8s.NewClusterRoleBinding(defaults.OperatorClusterRoleName, k.params.Namespace, defaults.OperatorServiceAccountName), metav1.CreateOptions{}); err != nil { + if _, err := k.client.CreateClusterRoleBinding(ctx, k.NewClusterRoleBinding(defaults.OperatorClusterRoleName, defaults.OperatorServiceAccountName), metav1.CreateOptions{}); err != nil { return err } k.pushRollbackStep(func(ctx context.Context) { @@ -1664,6 +842,7 @@ func (k *K8sInstaller) Install(ctx context.Context) error { }) if k.params.Encryption == encryptionIPsec { + // TODO(aanm) automate this as well in form of helm chart if err := k.createEncryptionSecret(ctx); err != nil { return err } @@ -1678,6 +857,7 @@ func (k *K8sInstaller) Install(ctx context.Context) error { } }) + // TODO(aanm) automate this as well in form of helm chart configMap, err := k.generateConfigMap() if err != nil { return fmt.Errorf("cannot generate ConfigMap: %w", err) diff --git a/install/node_init.go b/install/node_init.go index 9e6c59330a..afb230a272 100644 --- a/install/node_init.go +++ b/install/node_init.go @@ -4,199 +4,53 @@ package install import ( - "strings" + "context" + "github.com/cilium/cilium/pkg/versioncheck" + "helm.sh/helm/v3/pkg/strvals" appsv1 "k8s.io/api/apps/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/yaml" - "github.com/cilium/cilium-cli/defaults" "github.com/cilium/cilium-cli/internal/k8s" ) var ( nodeInitScript = map[k8s.Kind]string{ - k8s.KindEKS: bash(` -# When running in AWS ENI mode, it's likely that 'aws-node' has -# had a chance to install SNAT iptables rules. These can result -# in dropped traffic, so we should attempt to remove them. -# We do it using a 'postStart' hook since this may need to run -# for nodes which might have already been init'ed but may still -# have dangling rules. This is safe because there are no -# dependencies on anything that is part of the startup script -# itself, and can be safely run multiple times per node (e.g. in -# case of a restart). -if [[ "$(iptables-save | grep -c AWS-SNAT-CHAIN)" != "0" ]]; -then - echo 'Deleting iptables rules created by the AWS CNI VPC plugin' - iptables-save | grep -v AWS-SNAT-CHAIN | iptables-restore -fi -echo 'Done!' -`), - k8s.KindGKE: bash(` -# Check if we're running on a GKE containerd flavor. -GKE_KUBERNETES_BIN_DIR="/home/kubernetes/bin" -if [[ -f "${GKE_KUBERNETES_BIN_DIR}/gke" ]] && command -v containerd &>/dev/null; then - echo "GKE *_containerd flavor detected..." - - # (GKE *_containerd) Upon node restarts, GKE's containerd images seem to reset - # the /etc directory and our changes to the kubelet and Cilium's CNI - # configuration are removed. This leaves room for containerd and its CNI to - # take over pods previously managed by Cilium, causing Cilium to lose - # ownership over these pods. We rely on the empirical observation that - # /home/kubernetes/bin/kubelet is not changed across node reboots, and replace - # it with a wrapper script that performs some initialization steps when - # required and then hands over control to the real kubelet. - - # Only create the kubelet wrapper if we haven't previously done so. - if [[ ! -f "${GKE_KUBERNETES_BIN_DIR}/the-kubelet" ]]; - then - echo "Installing the kubelet wrapper..." - - # Rename the real kubelet. - mv "${GKE_KUBERNETES_BIN_DIR}/kubelet" "${GKE_KUBERNETES_BIN_DIR}/the-kubelet" - - # Initialize the kubelet wrapper which lives in the place of the real kubelet. - touch "${GKE_KUBERNETES_BIN_DIR}/kubelet" - chmod a+x "${GKE_KUBERNETES_BIN_DIR}/kubelet" - - # Populate the kubelet wrapper. It will perform the initialization steps we - # need and then become the kubelet. - cat <<'EOF' | tee "${GKE_KUBERNETES_BIN_DIR}/kubelet" -#!/bin/bash -set -euo pipefail -CNI_CONF_DIR="/etc/cni/net.d" -CONTAINERD_CONFIG="/etc/containerd/config.toml" -# Only stop and start containerd if the Cilium CNI configuration does not exist, -# or if the 'conf_template' property is present in the containerd config file, -# in order to avoid unnecessarily restarting containerd. -if [[ -z "$(find "${CNI_CONF_DIR}" -type f -name '*cilium*')" || \ - "$(grep -cE '^\s+conf_template' "${CONTAINERD_CONFIG}")" != "0" ]]; -then - # Stop containerd as it starts by creating a CNI configuration from a template - # causing pods to start with IPs assigned by GKE's CNI. - # 'disable --now' is used instead of stop as this script runs concurrently - # with containerd on node startup, and hence containerd might not have been - # started yet, in which case 'disable' prevents it from starting. - echo "Disabling and stopping containerd" - systemctl disable --now containerd - # Remove any pre-existing files in the CNI configuration directory. We skip - # any possibly existing Cilium configuration file for the obvious reasons. - echo "Removing undesired CNI configuration files" - find "${CNI_CONF_DIR}" -type f -not -name '*cilium*' -exec rm {} \; - # As mentioned above, the containerd configuration needs a little tweak in - # order not to create the default CNI configuration, so we update its config. - echo "Fixing containerd configuration" - sed -Ei 's/^(\s+conf_template)/\#\1/g' "${CONTAINERD_CONFIG}" - # Start containerd. It won't create it's CNI configuration file anymore. - echo "Enabling and starting containerd" - systemctl enable --now containerd -fi -# Become the real kubelet, and pass it some additionally required flags (and -# place these last so they have precedence). -exec /home/kubernetes/bin/the-kubelet "${@}" --network-plugin=cni --cni-bin-dir={{ .Values.cni.binPath }} -EOF - else - echo "Kubelet wrapper already exists, skipping..." - fi -else - # (Generic) Alter the kubelet configuration to run in CNI mode - echo "Changing kubelet configuration to --network-plugin=cni --cni-bin-dir={{ .Values.cni.binPath }}" - mkdir -p {{ .Values.cni.binPath }} - sed -i "s:--network-plugin=kubenet:--network-plugin=cni\ --cni-bin-dir={{ .Values.cni.binPath }}:g" /etc/default/kubelet -fi -echo "Restarting the kubelet..." -systemctl restart kubelet - -iptables -w -t nat -D POSTROUTING -m comment --comment "ip-masq: ensure nat POSTROUTING directs all non-LOCAL destination traffic to our custom IP-MASQ chain" -m addrtype ! --dst-type LOCAL -j IP-MASQ || true - -mkdir -p /tmp/cilium-bootstrap -date > /tmp/cilium-bootstrap/time -`)} + k8s.KindEKS: "", + k8s.KindGKE: "", + } ) -func nodeInitLabels(k k8s.Kind) map[string]string { - return map[string]string{ - "k8s-app": nodeInitName(k), +func (k *K8sInstaller) generateNodeInitDaemonSet(_ k8s.Kind) *appsv1.DaemonSet { + var ( + v = map[string]interface{}{} + helmOpts string + ) + + ciliumVer := k.getCiliumVersion() + switch { + case versioncheck.MustCompile(">=1.11.0")(ciliumVer): + helmOpts = `nodeinit.enabled=true,agent=false,operator.enabled=false` + default: + // TODO(aanm) fix me + panic("unsupported version") } -} -func nodeInitName(k k8s.Kind) string { - return "cilium-" + strings.ToLower(k.String()) + "-node-init" -} - -func nodeInitScriptConfigMapKey(k k8s.Kind) string { - return strings.ToLower(k.String()) + "-node-init-script" -} - -func (k *K8sInstaller) generateNodeInitDaemonSet(kind k8s.Kind) *appsv1.DaemonSet { - ds := &appsv1.DaemonSet{ - ObjectMeta: metav1.ObjectMeta{ - Name: nodeInitName(kind), - Labels: nodeInitLabels(kind), - }, - Spec: appsv1.DaemonSetSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: nodeInitLabels(kind), - }, - Template: corev1.PodTemplateSpec{ - ObjectMeta: metav1.ObjectMeta{ - Name: nodeInitName(kind), - Labels: nodeInitLabels(kind), - }, - Spec: corev1.PodSpec{ - HostNetwork: true, - HostPID: true, - RestartPolicy: corev1.RestartPolicyAlways, - PriorityClassName: "system-node-critical", - ServiceAccountName: defaults.AgentServiceAccountName, - TerminationGracePeriodSeconds: &agentTerminationGracePeriodSeconds, - Tolerations: []corev1.Toleration{ - {Operator: corev1.TolerationOpExists}, - }, - Containers: []corev1.Container{ - { - Name: "node-init", - Image: "quay.io/cilium/startup-script:62bfbe88c17778aad7bef9fa57ff9e2d4a9ba0d8", - ImagePullPolicy: corev1.PullIfNotPresent, - SecurityContext: &corev1.SecurityContext{ - Capabilities: &corev1.Capabilities{ - Add: []corev1.Capability{"NET_ADMIN"}, - }, - Privileged: &varTrue, - }, - Env: []corev1.EnvVar{ - { - Name: "CHECKPOINT_PATH", - Value: "/tmp/node-init.cilium.io", - }, - { - Name: "STARTUP_SCRIPT", - ValueFrom: &corev1.EnvVarSource{ - ConfigMapKeyRef: &corev1.ConfigMapKeySelector{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: defaults.ConfigMapName, - }, - Key: nodeInitScriptConfigMapKey(kind), - }, - }, - }, - }, - }, - }, - }, - }, - }, + err := strvals.ParseInto(helmOpts, v) + if err != nil { + // Developer mistake, this shouldn't happen + panic(err) + } + rel, err := k.helmClient.RunWithContext(context.TODO(), k.helmChart, v) + if err != nil { + panic(err) } - return ds -} - -func bash(v string) string { - return `#!/bin/bash -set -o errexit -set -o pipefail -set -o nounset - -` + v + var ds appsv1.DaemonSet + err = yaml.Unmarshal([]byte(rel.Manifest), &ds) + if err != nil { + // Developer mistake, this shouldn't happen + panic(err) + } + return &ds } diff --git a/install/rbac.go b/install/rbac.go new file mode 100644 index 0000000000..a960837e44 --- /dev/null +++ b/install/rbac.go @@ -0,0 +1,158 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright 2021 Authors of Cilium + +package install + +import ( + "context" + "fmt" + + "github.com/cilium/cilium/pkg/versioncheck" + "helm.sh/helm/v3/pkg/strvals" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + "sigs.k8s.io/yaml" + + "github.com/cilium/cilium-cli/defaults" +) + +func (k *K8sInstaller) NewServiceAccount(name string) *corev1.ServiceAccount { + var ( + v = map[string]interface{}{} + saFileName, helmOpts string + ) + + ciliumVer := k.getCiliumVersion() + switch { + case versioncheck.MustCompile(">=1.11.0")(ciliumVer): + switch name { + case defaults.AgentServiceAccountName: + helmOpts = fmt.Sprintf(`serviceAccounts.cilium.name=%s`, name) + saFileName = "templates/cilium-agent/serviceaccount.yaml" + case defaults.OperatorServiceAccountName: + helmOpts = fmt.Sprintf(`serviceAccounts.operator.name=%s`, name) + saFileName = "templates/cilium-operator/serviceaccount.yaml" + } + default: + // TODO(aanm) fix me + panic("unsupported version") + } + + err := strvals.ParseInto(helmOpts, v) + if err != nil { + // Developer mistake, this shouldn't happen + panic(err) + } + + rel, err := k.helmClient.RunWithContext(context.TODO(), k.helmChart, v) + if err != nil { + panic(err) + } + + saFile, err := FilterManifests(rel.Manifest, []string{saFileName}) + if err != nil { + panic(err) + } + + var sa corev1.ServiceAccount + err = yaml.Unmarshal([]byte(saFile[saFileName]), &sa) + if err != nil { + // Developer mistake, this shouldn't happen + panic(err) + } + return &sa +} + +func (k *K8sInstaller) NewClusterRole(name string) *rbacv1.ClusterRole { + var ( + v = map[string]interface{}{} + crFileName, helmOpts string + ) + + ciliumVer := k.getCiliumVersion() + switch { + case versioncheck.MustCompile(">=1.11.0")(ciliumVer): + switch name { + case defaults.AgentServiceAccountName: + helmOpts = fmt.Sprintf(`serviceAccounts.cilium.name=%s`, name) + crFileName = "templates/cilium-agent/clusterrole.yaml" + case defaults.OperatorServiceAccountName: + helmOpts = fmt.Sprintf(`serviceAccounts.operator.name=%s`, name) + crFileName = "templates/cilium-operator/clusterrole.yaml" + } + default: + // TODO(aanm) fix me + panic("unsupported version") + } + + err := strvals.ParseInto(helmOpts, v) + if err != nil { + // Developer mistake, this shouldn't happen + panic(err) + } + + rel, err := k.helmClient.RunWithContext(context.TODO(), k.helmChart, v) + if err != nil { + panic(err) + } + + crFile, err := FilterManifests(rel.Manifest, []string{crFileName}) + if err != nil { + panic(err) + } + + var cr rbacv1.ClusterRole + err = yaml.Unmarshal([]byte(crFile[crFileName]), &cr) + if err != nil { + // Developer mistake, this shouldn't happen + panic(err) + } + return &cr +} + +func (k *K8sInstaller) NewClusterRoleBinding(crbName, saName string) *rbacv1.ClusterRoleBinding { + var ( + v = map[string]interface{}{} + crbFileName, helmOpts string + ) + + ciliumVer := k.getCiliumVersion() + switch { + case versioncheck.MustCompile(">=1.11.0")(ciliumVer): + switch crbName { + case defaults.AgentClusterRoleName: + helmOpts = fmt.Sprintf(`serviceAccounts.cilium.name=%s`, saName) + crbFileName = "templates/cilium-agent/clusterrolebinding.yaml" + case defaults.OperatorClusterRoleName: + helmOpts = fmt.Sprintf(`serviceAccounts.operator.name=%s`, saName) + crbFileName = "templates/cilium-operator/clusterrolebinding.yaml" + } + default: + // TODO(aanm) fix me + panic("unsupported version") + } + + err := strvals.ParseInto(helmOpts, v) + if err != nil { + // Developer mistake, this shouldn't happen + panic(err) + } + + rel, err := k.helmClient.RunWithContext(context.TODO(), k.helmChart, v) + if err != nil { + panic(err) + } + + crbFile, err := FilterManifests(rel.Manifest, []string{crbFileName}) + if err != nil { + panic(err) + } + + var crb rbacv1.ClusterRoleBinding + err = yaml.Unmarshal([]byte(crbFile[crbFileName]), &crb) + if err != nil { + // Developer mistake, this shouldn't happen + panic(err) + } + return &crb +} diff --git a/install/uninstall.go b/install/uninstall.go index c448823aa0..33c0750902 100644 --- a/install/uninstall.go +++ b/install/uninstall.go @@ -90,7 +90,7 @@ func (k *K8sUninstaller) Uninstall(ctx context.Context) error { if _, exists := nodeInitScript[k.flavor.Kind]; exists { k.Log("🔥 Deleting node init daemonset...") - k.client.DeleteDaemonSet(ctx, k.params.Namespace, nodeInitName(k.flavor.Kind), metav1.DeleteOptions{}) + k.client.DeleteDaemonSet(ctx, k.params.Namespace, defaults.NodeInitDaemonSetName, metav1.DeleteOptions{}) } if k.params.Wait { diff --git a/vendor/github.com/Azure/go-ansiterm/LICENSE b/vendor/github.com/Azure/go-ansiterm/LICENSE new file mode 100644 index 0000000000..e3d9a64d1d --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Microsoft Corporation + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Azure/go-ansiterm/README.md b/vendor/github.com/Azure/go-ansiterm/README.md new file mode 100644 index 0000000000..261c041e7a --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/README.md @@ -0,0 +1,12 @@ +# go-ansiterm + +This is a cross platform Ansi Terminal Emulation library. It reads a stream of Ansi characters and produces the appropriate function calls. The results of the function calls are platform dependent. + +For example the parser might receive "ESC, [, A" as a stream of three characters. This is the code for Cursor Up (http://www.vt100.net/docs/vt510-rm/CUU). The parser then calls the cursor up function (CUU()) on an event handler. The event handler determines what platform specific work must be done to cause the cursor to move up one position. + +The parser (parser.go) is a partial implementation of this state machine (http://vt100.net/emu/vt500_parser.png). There are also two event handler implementations, one for tests (test_event_handler.go) to validate that the expected events are being produced and called, the other is a Windows implementation (winterm/win_event_handler.go). + +See parser_test.go for examples exercising the state machine and generating appropriate function calls. + +----- +This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/). For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. diff --git a/vendor/github.com/Azure/go-ansiterm/constants.go b/vendor/github.com/Azure/go-ansiterm/constants.go new file mode 100644 index 0000000000..96504a33bc --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/constants.go @@ -0,0 +1,188 @@ +package ansiterm + +const LogEnv = "DEBUG_TERMINAL" + +// ANSI constants +// References: +// -- http://www.ecma-international.org/publications/standards/Ecma-048.htm +// -- http://man7.org/linux/man-pages/man4/console_codes.4.html +// -- http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html +// -- http://en.wikipedia.org/wiki/ANSI_escape_code +// -- http://vt100.net/emu/dec_ansi_parser +// -- http://vt100.net/emu/vt500_parser.svg +// -- http://invisible-island.net/xterm/ctlseqs/ctlseqs.html +// -- http://www.inwap.com/pdp10/ansicode.txt +const ( + // ECMA-48 Set Graphics Rendition + // Note: + // -- Constants leading with an underscore (e.g., _ANSI_xxx) are unsupported or reserved + // -- Fonts could possibly be supported via SetCurrentConsoleFontEx + // -- Windows does not expose the per-window cursor (i.e., caret) blink times + ANSI_SGR_RESET = 0 + ANSI_SGR_BOLD = 1 + ANSI_SGR_DIM = 2 + _ANSI_SGR_ITALIC = 3 + ANSI_SGR_UNDERLINE = 4 + _ANSI_SGR_BLINKSLOW = 5 + _ANSI_SGR_BLINKFAST = 6 + ANSI_SGR_REVERSE = 7 + _ANSI_SGR_INVISIBLE = 8 + _ANSI_SGR_LINETHROUGH = 9 + _ANSI_SGR_FONT_00 = 10 + _ANSI_SGR_FONT_01 = 11 + _ANSI_SGR_FONT_02 = 12 + _ANSI_SGR_FONT_03 = 13 + _ANSI_SGR_FONT_04 = 14 + _ANSI_SGR_FONT_05 = 15 + _ANSI_SGR_FONT_06 = 16 + _ANSI_SGR_FONT_07 = 17 + _ANSI_SGR_FONT_08 = 18 + _ANSI_SGR_FONT_09 = 19 + _ANSI_SGR_FONT_10 = 20 + _ANSI_SGR_DOUBLEUNDERLINE = 21 + ANSI_SGR_BOLD_DIM_OFF = 22 + _ANSI_SGR_ITALIC_OFF = 23 + ANSI_SGR_UNDERLINE_OFF = 24 + _ANSI_SGR_BLINK_OFF = 25 + _ANSI_SGR_RESERVED_00 = 26 + ANSI_SGR_REVERSE_OFF = 27 + _ANSI_SGR_INVISIBLE_OFF = 28 + _ANSI_SGR_LINETHROUGH_OFF = 29 + ANSI_SGR_FOREGROUND_BLACK = 30 + ANSI_SGR_FOREGROUND_RED = 31 + ANSI_SGR_FOREGROUND_GREEN = 32 + ANSI_SGR_FOREGROUND_YELLOW = 33 + ANSI_SGR_FOREGROUND_BLUE = 34 + ANSI_SGR_FOREGROUND_MAGENTA = 35 + ANSI_SGR_FOREGROUND_CYAN = 36 + ANSI_SGR_FOREGROUND_WHITE = 37 + _ANSI_SGR_RESERVED_01 = 38 + ANSI_SGR_FOREGROUND_DEFAULT = 39 + ANSI_SGR_BACKGROUND_BLACK = 40 + ANSI_SGR_BACKGROUND_RED = 41 + ANSI_SGR_BACKGROUND_GREEN = 42 + ANSI_SGR_BACKGROUND_YELLOW = 43 + ANSI_SGR_BACKGROUND_BLUE = 44 + ANSI_SGR_BACKGROUND_MAGENTA = 45 + ANSI_SGR_BACKGROUND_CYAN = 46 + ANSI_SGR_BACKGROUND_WHITE = 47 + _ANSI_SGR_RESERVED_02 = 48 + ANSI_SGR_BACKGROUND_DEFAULT = 49 + // 50 - 65: Unsupported + + ANSI_MAX_CMD_LENGTH = 4096 + + MAX_INPUT_EVENTS = 128 + DEFAULT_WIDTH = 80 + DEFAULT_HEIGHT = 24 + + ANSI_BEL = 0x07 + ANSI_BACKSPACE = 0x08 + ANSI_TAB = 0x09 + ANSI_LINE_FEED = 0x0A + ANSI_VERTICAL_TAB = 0x0B + ANSI_FORM_FEED = 0x0C + ANSI_CARRIAGE_RETURN = 0x0D + ANSI_ESCAPE_PRIMARY = 0x1B + ANSI_ESCAPE_SECONDARY = 0x5B + ANSI_OSC_STRING_ENTRY = 0x5D + ANSI_COMMAND_FIRST = 0x40 + ANSI_COMMAND_LAST = 0x7E + DCS_ENTRY = 0x90 + CSI_ENTRY = 0x9B + OSC_STRING = 0x9D + ANSI_PARAMETER_SEP = ";" + ANSI_CMD_G0 = '(' + ANSI_CMD_G1 = ')' + ANSI_CMD_G2 = '*' + ANSI_CMD_G3 = '+' + ANSI_CMD_DECPNM = '>' + ANSI_CMD_DECPAM = '=' + ANSI_CMD_OSC = ']' + ANSI_CMD_STR_TERM = '\\' + + KEY_CONTROL_PARAM_2 = ";2" + KEY_CONTROL_PARAM_3 = ";3" + KEY_CONTROL_PARAM_4 = ";4" + KEY_CONTROL_PARAM_5 = ";5" + KEY_CONTROL_PARAM_6 = ";6" + KEY_CONTROL_PARAM_7 = ";7" + KEY_CONTROL_PARAM_8 = ";8" + KEY_ESC_CSI = "\x1B[" + KEY_ESC_N = "\x1BN" + KEY_ESC_O = "\x1BO" + + FILL_CHARACTER = ' ' +) + +func getByteRange(start byte, end byte) []byte { + bytes := make([]byte, 0, 32) + for i := start; i <= end; i++ { + bytes = append(bytes, byte(i)) + } + + return bytes +} + +var toGroundBytes = getToGroundBytes() +var executors = getExecuteBytes() + +// SPACE 20+A0 hex Always and everywhere a blank space +// Intermediate 20-2F hex !"#$%&'()*+,-./ +var intermeds = getByteRange(0x20, 0x2F) + +// Parameters 30-3F hex 0123456789:;<=>? +// CSI Parameters 30-39, 3B hex 0123456789; +var csiParams = getByteRange(0x30, 0x3F) + +var csiCollectables = append(getByteRange(0x30, 0x39), getByteRange(0x3B, 0x3F)...) + +// Uppercase 40-5F hex @ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_ +var upperCase = getByteRange(0x40, 0x5F) + +// Lowercase 60-7E hex `abcdefghijlkmnopqrstuvwxyz{|}~ +var lowerCase = getByteRange(0x60, 0x7E) + +// Alphabetics 40-7E hex (all of upper and lower case) +var alphabetics = append(upperCase, lowerCase...) + +var printables = getByteRange(0x20, 0x7F) + +var escapeIntermediateToGroundBytes = getByteRange(0x30, 0x7E) +var escapeToGroundBytes = getEscapeToGroundBytes() + +// See http://www.vt100.net/emu/vt500_parser.png for description of the complex +// byte ranges below + +func getEscapeToGroundBytes() []byte { + escapeToGroundBytes := getByteRange(0x30, 0x4F) + escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x51, 0x57)...) + escapeToGroundBytes = append(escapeToGroundBytes, 0x59) + escapeToGroundBytes = append(escapeToGroundBytes, 0x5A) + escapeToGroundBytes = append(escapeToGroundBytes, 0x5C) + escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x60, 0x7E)...) + return escapeToGroundBytes +} + +func getExecuteBytes() []byte { + executeBytes := getByteRange(0x00, 0x17) + executeBytes = append(executeBytes, 0x19) + executeBytes = append(executeBytes, getByteRange(0x1C, 0x1F)...) + return executeBytes +} + +func getToGroundBytes() []byte { + groundBytes := []byte{0x18} + groundBytes = append(groundBytes, 0x1A) + groundBytes = append(groundBytes, getByteRange(0x80, 0x8F)...) + groundBytes = append(groundBytes, getByteRange(0x91, 0x97)...) + groundBytes = append(groundBytes, 0x99) + groundBytes = append(groundBytes, 0x9A) + groundBytes = append(groundBytes, 0x9C) + return groundBytes +} + +// Delete 7F hex Always and everywhere ignored +// C1 Control 80-9F hex 32 additional control characters +// G1 Displayable A1-FE hex 94 additional displayable characters +// Special A0+FF hex Same as SPACE and DELETE diff --git a/vendor/github.com/Azure/go-ansiterm/context.go b/vendor/github.com/Azure/go-ansiterm/context.go new file mode 100644 index 0000000000..8d66e777c0 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/context.go @@ -0,0 +1,7 @@ +package ansiterm + +type ansiContext struct { + currentChar byte + paramBuffer []byte + interBuffer []byte +} diff --git a/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go b/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go new file mode 100644 index 0000000000..bcbe00d0c5 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/csi_entry_state.go @@ -0,0 +1,49 @@ +package ansiterm + +type csiEntryState struct { + baseState +} + +func (csiState csiEntryState) Handle(b byte) (s state, e error) { + csiState.parser.logf("CsiEntry::Handle %#x", b) + + nextState, err := csiState.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case sliceContains(alphabetics, b): + return csiState.parser.ground, nil + case sliceContains(csiCollectables, b): + return csiState.parser.csiParam, nil + case sliceContains(executors, b): + return csiState, csiState.parser.execute() + } + + return csiState, nil +} + +func (csiState csiEntryState) Transition(s state) error { + csiState.parser.logf("CsiEntry::Transition %s --> %s", csiState.Name(), s.Name()) + csiState.baseState.Transition(s) + + switch s { + case csiState.parser.ground: + return csiState.parser.csiDispatch() + case csiState.parser.csiParam: + switch { + case sliceContains(csiParams, csiState.parser.context.currentChar): + csiState.parser.collectParam() + case sliceContains(intermeds, csiState.parser.context.currentChar): + csiState.parser.collectInter() + } + } + + return nil +} + +func (csiState csiEntryState) Enter() error { + csiState.parser.clear() + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/csi_param_state.go b/vendor/github.com/Azure/go-ansiterm/csi_param_state.go new file mode 100644 index 0000000000..7ed5e01c34 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/csi_param_state.go @@ -0,0 +1,38 @@ +package ansiterm + +type csiParamState struct { + baseState +} + +func (csiState csiParamState) Handle(b byte) (s state, e error) { + csiState.parser.logf("CsiParam::Handle %#x", b) + + nextState, err := csiState.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case sliceContains(alphabetics, b): + return csiState.parser.ground, nil + case sliceContains(csiCollectables, b): + csiState.parser.collectParam() + return csiState, nil + case sliceContains(executors, b): + return csiState, csiState.parser.execute() + } + + return csiState, nil +} + +func (csiState csiParamState) Transition(s state) error { + csiState.parser.logf("CsiParam::Transition %s --> %s", csiState.Name(), s.Name()) + csiState.baseState.Transition(s) + + switch s { + case csiState.parser.ground: + return csiState.parser.csiDispatch() + } + + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go b/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go new file mode 100644 index 0000000000..1c719db9e4 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/escape_intermediate_state.go @@ -0,0 +1,36 @@ +package ansiterm + +type escapeIntermediateState struct { + baseState +} + +func (escState escapeIntermediateState) Handle(b byte) (s state, e error) { + escState.parser.logf("escapeIntermediateState::Handle %#x", b) + nextState, err := escState.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case sliceContains(intermeds, b): + return escState, escState.parser.collectInter() + case sliceContains(executors, b): + return escState, escState.parser.execute() + case sliceContains(escapeIntermediateToGroundBytes, b): + return escState.parser.ground, nil + } + + return escState, nil +} + +func (escState escapeIntermediateState) Transition(s state) error { + escState.parser.logf("escapeIntermediateState::Transition %s --> %s", escState.Name(), s.Name()) + escState.baseState.Transition(s) + + switch s { + case escState.parser.ground: + return escState.parser.escDispatch() + } + + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/escape_state.go b/vendor/github.com/Azure/go-ansiterm/escape_state.go new file mode 100644 index 0000000000..6390abd231 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/escape_state.go @@ -0,0 +1,47 @@ +package ansiterm + +type escapeState struct { + baseState +} + +func (escState escapeState) Handle(b byte) (s state, e error) { + escState.parser.logf("escapeState::Handle %#x", b) + nextState, err := escState.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case b == ANSI_ESCAPE_SECONDARY: + return escState.parser.csiEntry, nil + case b == ANSI_OSC_STRING_ENTRY: + return escState.parser.oscString, nil + case sliceContains(executors, b): + return escState, escState.parser.execute() + case sliceContains(escapeToGroundBytes, b): + return escState.parser.ground, nil + case sliceContains(intermeds, b): + return escState.parser.escapeIntermediate, nil + } + + return escState, nil +} + +func (escState escapeState) Transition(s state) error { + escState.parser.logf("Escape::Transition %s --> %s", escState.Name(), s.Name()) + escState.baseState.Transition(s) + + switch s { + case escState.parser.ground: + return escState.parser.escDispatch() + case escState.parser.escapeIntermediate: + return escState.parser.collectInter() + } + + return nil +} + +func (escState escapeState) Enter() error { + escState.parser.clear() + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/event_handler.go b/vendor/github.com/Azure/go-ansiterm/event_handler.go new file mode 100644 index 0000000000..98087b38c2 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/event_handler.go @@ -0,0 +1,90 @@ +package ansiterm + +type AnsiEventHandler interface { + // Print + Print(b byte) error + + // Execute C0 commands + Execute(b byte) error + + // CUrsor Up + CUU(int) error + + // CUrsor Down + CUD(int) error + + // CUrsor Forward + CUF(int) error + + // CUrsor Backward + CUB(int) error + + // Cursor to Next Line + CNL(int) error + + // Cursor to Previous Line + CPL(int) error + + // Cursor Horizontal position Absolute + CHA(int) error + + // Vertical line Position Absolute + VPA(int) error + + // CUrsor Position + CUP(int, int) error + + // Horizontal and Vertical Position (depends on PUM) + HVP(int, int) error + + // Text Cursor Enable Mode + DECTCEM(bool) error + + // Origin Mode + DECOM(bool) error + + // 132 Column Mode + DECCOLM(bool) error + + // Erase in Display + ED(int) error + + // Erase in Line + EL(int) error + + // Insert Line + IL(int) error + + // Delete Line + DL(int) error + + // Insert Character + ICH(int) error + + // Delete Character + DCH(int) error + + // Set Graphics Rendition + SGR([]int) error + + // Pan Down + SU(int) error + + // Pan Up + SD(int) error + + // Device Attributes + DA([]string) error + + // Set Top and Bottom Margins + DECSTBM(int, int) error + + // Index + IND() error + + // Reverse Index + RI() error + + // Flush updates from previous commands + Flush() error +} diff --git a/vendor/github.com/Azure/go-ansiterm/ground_state.go b/vendor/github.com/Azure/go-ansiterm/ground_state.go new file mode 100644 index 0000000000..52451e9469 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/ground_state.go @@ -0,0 +1,24 @@ +package ansiterm + +type groundState struct { + baseState +} + +func (gs groundState) Handle(b byte) (s state, e error) { + gs.parser.context.currentChar = b + + nextState, err := gs.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case sliceContains(printables, b): + return gs, gs.parser.print() + + case sliceContains(executors, b): + return gs, gs.parser.execute() + } + + return gs, nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/osc_string_state.go b/vendor/github.com/Azure/go-ansiterm/osc_string_state.go new file mode 100644 index 0000000000..593b10ab69 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/osc_string_state.go @@ -0,0 +1,31 @@ +package ansiterm + +type oscStringState struct { + baseState +} + +func (oscState oscStringState) Handle(b byte) (s state, e error) { + oscState.parser.logf("OscString::Handle %#x", b) + nextState, err := oscState.baseState.Handle(b) + if nextState != nil || err != nil { + return nextState, err + } + + switch { + case isOscStringTerminator(b): + return oscState.parser.ground, nil + } + + return oscState, nil +} + +// See below for OSC string terminators for linux +// http://man7.org/linux/man-pages/man4/console_codes.4.html +func isOscStringTerminator(b byte) bool { + + if b == ANSI_BEL || b == 0x5C { + return true + } + + return false +} diff --git a/vendor/github.com/Azure/go-ansiterm/parser.go b/vendor/github.com/Azure/go-ansiterm/parser.go new file mode 100644 index 0000000000..03cec7ada6 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/parser.go @@ -0,0 +1,151 @@ +package ansiterm + +import ( + "errors" + "log" + "os" +) + +type AnsiParser struct { + currState state + eventHandler AnsiEventHandler + context *ansiContext + csiEntry state + csiParam state + dcsEntry state + escape state + escapeIntermediate state + error state + ground state + oscString state + stateMap []state + + logf func(string, ...interface{}) +} + +type Option func(*AnsiParser) + +func WithLogf(f func(string, ...interface{})) Option { + return func(ap *AnsiParser) { + ap.logf = f + } +} + +func CreateParser(initialState string, evtHandler AnsiEventHandler, opts ...Option) *AnsiParser { + ap := &AnsiParser{ + eventHandler: evtHandler, + context: &ansiContext{}, + } + for _, o := range opts { + o(ap) + } + + if isDebugEnv := os.Getenv(LogEnv); isDebugEnv == "1" { + logFile, _ := os.Create("ansiParser.log") + logger := log.New(logFile, "", log.LstdFlags) + if ap.logf != nil { + l := ap.logf + ap.logf = func(s string, v ...interface{}) { + l(s, v...) + logger.Printf(s, v...) + } + } else { + ap.logf = logger.Printf + } + } + + if ap.logf == nil { + ap.logf = func(string, ...interface{}) {} + } + + ap.csiEntry = csiEntryState{baseState{name: "CsiEntry", parser: ap}} + ap.csiParam = csiParamState{baseState{name: "CsiParam", parser: ap}} + ap.dcsEntry = dcsEntryState{baseState{name: "DcsEntry", parser: ap}} + ap.escape = escapeState{baseState{name: "Escape", parser: ap}} + ap.escapeIntermediate = escapeIntermediateState{baseState{name: "EscapeIntermediate", parser: ap}} + ap.error = errorState{baseState{name: "Error", parser: ap}} + ap.ground = groundState{baseState{name: "Ground", parser: ap}} + ap.oscString = oscStringState{baseState{name: "OscString", parser: ap}} + + ap.stateMap = []state{ + ap.csiEntry, + ap.csiParam, + ap.dcsEntry, + ap.escape, + ap.escapeIntermediate, + ap.error, + ap.ground, + ap.oscString, + } + + ap.currState = getState(initialState, ap.stateMap) + + ap.logf("CreateParser: parser %p", ap) + return ap +} + +func getState(name string, states []state) state { + for _, el := range states { + if el.Name() == name { + return el + } + } + + return nil +} + +func (ap *AnsiParser) Parse(bytes []byte) (int, error) { + for i, b := range bytes { + if err := ap.handle(b); err != nil { + return i, err + } + } + + return len(bytes), ap.eventHandler.Flush() +} + +func (ap *AnsiParser) handle(b byte) error { + ap.context.currentChar = b + newState, err := ap.currState.Handle(b) + if err != nil { + return err + } + + if newState == nil { + ap.logf("WARNING: newState is nil") + return errors.New("New state of 'nil' is invalid.") + } + + if newState != ap.currState { + if err := ap.changeState(newState); err != nil { + return err + } + } + + return nil +} + +func (ap *AnsiParser) changeState(newState state) error { + ap.logf("ChangeState %s --> %s", ap.currState.Name(), newState.Name()) + + // Exit old state + if err := ap.currState.Exit(); err != nil { + ap.logf("Exit state '%s' failed with : '%v'", ap.currState.Name(), err) + return err + } + + // Perform transition action + if err := ap.currState.Transition(newState); err != nil { + ap.logf("Transition from '%s' to '%s' failed with: '%v'", ap.currState.Name(), newState.Name, err) + return err + } + + // Enter new state + if err := newState.Enter(); err != nil { + ap.logf("Enter state '%s' failed with: '%v'", newState.Name(), err) + return err + } + + ap.currState = newState + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go b/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go new file mode 100644 index 0000000000..de0a1f9cde --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/parser_action_helpers.go @@ -0,0 +1,99 @@ +package ansiterm + +import ( + "strconv" +) + +func parseParams(bytes []byte) ([]string, error) { + paramBuff := make([]byte, 0, 0) + params := []string{} + + for _, v := range bytes { + if v == ';' { + if len(paramBuff) > 0 { + // Completed parameter, append it to the list + s := string(paramBuff) + params = append(params, s) + paramBuff = make([]byte, 0, 0) + } + } else { + paramBuff = append(paramBuff, v) + } + } + + // Last parameter may not be terminated with ';' + if len(paramBuff) > 0 { + s := string(paramBuff) + params = append(params, s) + } + + return params, nil +} + +func parseCmd(context ansiContext) (string, error) { + return string(context.currentChar), nil +} + +func getInt(params []string, dflt int) int { + i := getInts(params, 1, dflt)[0] + return i +} + +func getInts(params []string, minCount int, dflt int) []int { + ints := []int{} + + for _, v := range params { + i, _ := strconv.Atoi(v) + // Zero is mapped to the default value in VT100. + if i == 0 { + i = dflt + } + ints = append(ints, i) + } + + if len(ints) < minCount { + remaining := minCount - len(ints) + for i := 0; i < remaining; i++ { + ints = append(ints, dflt) + } + } + + return ints +} + +func (ap *AnsiParser) modeDispatch(param string, set bool) error { + switch param { + case "?3": + return ap.eventHandler.DECCOLM(set) + case "?6": + return ap.eventHandler.DECOM(set) + case "?25": + return ap.eventHandler.DECTCEM(set) + } + return nil +} + +func (ap *AnsiParser) hDispatch(params []string) error { + if len(params) == 1 { + return ap.modeDispatch(params[0], true) + } + + return nil +} + +func (ap *AnsiParser) lDispatch(params []string) error { + if len(params) == 1 { + return ap.modeDispatch(params[0], false) + } + + return nil +} + +func getEraseParam(params []string) int { + param := getInt(params, 0) + if param < 0 || 3 < param { + param = 0 + } + + return param +} diff --git a/vendor/github.com/Azure/go-ansiterm/parser_actions.go b/vendor/github.com/Azure/go-ansiterm/parser_actions.go new file mode 100644 index 0000000000..0bb5e51e9a --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/parser_actions.go @@ -0,0 +1,119 @@ +package ansiterm + +func (ap *AnsiParser) collectParam() error { + currChar := ap.context.currentChar + ap.logf("collectParam %#x", currChar) + ap.context.paramBuffer = append(ap.context.paramBuffer, currChar) + return nil +} + +func (ap *AnsiParser) collectInter() error { + currChar := ap.context.currentChar + ap.logf("collectInter %#x", currChar) + ap.context.paramBuffer = append(ap.context.interBuffer, currChar) + return nil +} + +func (ap *AnsiParser) escDispatch() error { + cmd, _ := parseCmd(*ap.context) + intermeds := ap.context.interBuffer + ap.logf("escDispatch currentChar: %#x", ap.context.currentChar) + ap.logf("escDispatch: %v(%v)", cmd, intermeds) + + switch cmd { + case "D": // IND + return ap.eventHandler.IND() + case "E": // NEL, equivalent to CRLF + err := ap.eventHandler.Execute(ANSI_CARRIAGE_RETURN) + if err == nil { + err = ap.eventHandler.Execute(ANSI_LINE_FEED) + } + return err + case "M": // RI + return ap.eventHandler.RI() + } + + return nil +} + +func (ap *AnsiParser) csiDispatch() error { + cmd, _ := parseCmd(*ap.context) + params, _ := parseParams(ap.context.paramBuffer) + ap.logf("Parsed params: %v with length: %d", params, len(params)) + + ap.logf("csiDispatch: %v(%v)", cmd, params) + + switch cmd { + case "@": + return ap.eventHandler.ICH(getInt(params, 1)) + case "A": + return ap.eventHandler.CUU(getInt(params, 1)) + case "B": + return ap.eventHandler.CUD(getInt(params, 1)) + case "C": + return ap.eventHandler.CUF(getInt(params, 1)) + case "D": + return ap.eventHandler.CUB(getInt(params, 1)) + case "E": + return ap.eventHandler.CNL(getInt(params, 1)) + case "F": + return ap.eventHandler.CPL(getInt(params, 1)) + case "G": + return ap.eventHandler.CHA(getInt(params, 1)) + case "H": + ints := getInts(params, 2, 1) + x, y := ints[0], ints[1] + return ap.eventHandler.CUP(x, y) + case "J": + param := getEraseParam(params) + return ap.eventHandler.ED(param) + case "K": + param := getEraseParam(params) + return ap.eventHandler.EL(param) + case "L": + return ap.eventHandler.IL(getInt(params, 1)) + case "M": + return ap.eventHandler.DL(getInt(params, 1)) + case "P": + return ap.eventHandler.DCH(getInt(params, 1)) + case "S": + return ap.eventHandler.SU(getInt(params, 1)) + case "T": + return ap.eventHandler.SD(getInt(params, 1)) + case "c": + return ap.eventHandler.DA(params) + case "d": + return ap.eventHandler.VPA(getInt(params, 1)) + case "f": + ints := getInts(params, 2, 1) + x, y := ints[0], ints[1] + return ap.eventHandler.HVP(x, y) + case "h": + return ap.hDispatch(params) + case "l": + return ap.lDispatch(params) + case "m": + return ap.eventHandler.SGR(getInts(params, 1, 0)) + case "r": + ints := getInts(params, 2, 1) + top, bottom := ints[0], ints[1] + return ap.eventHandler.DECSTBM(top, bottom) + default: + ap.logf("ERROR: Unsupported CSI command: '%s', with full context: %v", cmd, ap.context) + return nil + } + +} + +func (ap *AnsiParser) print() error { + return ap.eventHandler.Print(ap.context.currentChar) +} + +func (ap *AnsiParser) clear() error { + ap.context = &ansiContext{} + return nil +} + +func (ap *AnsiParser) execute() error { + return ap.eventHandler.Execute(ap.context.currentChar) +} diff --git a/vendor/github.com/Azure/go-ansiterm/states.go b/vendor/github.com/Azure/go-ansiterm/states.go new file mode 100644 index 0000000000..f2ea1fcd12 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/states.go @@ -0,0 +1,71 @@ +package ansiterm + +type stateID int + +type state interface { + Enter() error + Exit() error + Handle(byte) (state, error) + Name() string + Transition(state) error +} + +type baseState struct { + name string + parser *AnsiParser +} + +func (base baseState) Enter() error { + return nil +} + +func (base baseState) Exit() error { + return nil +} + +func (base baseState) Handle(b byte) (s state, e error) { + + switch { + case b == CSI_ENTRY: + return base.parser.csiEntry, nil + case b == DCS_ENTRY: + return base.parser.dcsEntry, nil + case b == ANSI_ESCAPE_PRIMARY: + return base.parser.escape, nil + case b == OSC_STRING: + return base.parser.oscString, nil + case sliceContains(toGroundBytes, b): + return base.parser.ground, nil + } + + return nil, nil +} + +func (base baseState) Name() string { + return base.name +} + +func (base baseState) Transition(s state) error { + if s == base.parser.ground { + execBytes := []byte{0x18} + execBytes = append(execBytes, 0x1A) + execBytes = append(execBytes, getByteRange(0x80, 0x8F)...) + execBytes = append(execBytes, getByteRange(0x91, 0x97)...) + execBytes = append(execBytes, 0x99) + execBytes = append(execBytes, 0x9A) + + if sliceContains(execBytes, base.parser.context.currentChar) { + return base.parser.execute() + } + } + + return nil +} + +type dcsEntryState struct { + baseState +} + +type errorState struct { + baseState +} diff --git a/vendor/github.com/Azure/go-ansiterm/utilities.go b/vendor/github.com/Azure/go-ansiterm/utilities.go new file mode 100644 index 0000000000..392114493a --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/utilities.go @@ -0,0 +1,21 @@ +package ansiterm + +import ( + "strconv" +) + +func sliceContains(bytes []byte, b byte) bool { + for _, v := range bytes { + if v == b { + return true + } + } + + return false +} + +func convertBytesToInteger(bytes []byte) int { + s := string(bytes) + i, _ := strconv.Atoi(s) + return i +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go b/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go new file mode 100644 index 0000000000..5599082ae9 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/ansi.go @@ -0,0 +1,196 @@ +// +build windows + +package winterm + +import ( + "fmt" + "os" + "strconv" + "strings" + "syscall" + + "github.com/Azure/go-ansiterm" + windows "golang.org/x/sys/windows" +) + +// Windows keyboard constants +// See https://msdn.microsoft.com/en-us/library/windows/desktop/dd375731(v=vs.85).aspx. +const ( + VK_PRIOR = 0x21 // PAGE UP key + VK_NEXT = 0x22 // PAGE DOWN key + VK_END = 0x23 // END key + VK_HOME = 0x24 // HOME key + VK_LEFT = 0x25 // LEFT ARROW key + VK_UP = 0x26 // UP ARROW key + VK_RIGHT = 0x27 // RIGHT ARROW key + VK_DOWN = 0x28 // DOWN ARROW key + VK_SELECT = 0x29 // SELECT key + VK_PRINT = 0x2A // PRINT key + VK_EXECUTE = 0x2B // EXECUTE key + VK_SNAPSHOT = 0x2C // PRINT SCREEN key + VK_INSERT = 0x2D // INS key + VK_DELETE = 0x2E // DEL key + VK_HELP = 0x2F // HELP key + VK_F1 = 0x70 // F1 key + VK_F2 = 0x71 // F2 key + VK_F3 = 0x72 // F3 key + VK_F4 = 0x73 // F4 key + VK_F5 = 0x74 // F5 key + VK_F6 = 0x75 // F6 key + VK_F7 = 0x76 // F7 key + VK_F8 = 0x77 // F8 key + VK_F9 = 0x78 // F9 key + VK_F10 = 0x79 // F10 key + VK_F11 = 0x7A // F11 key + VK_F12 = 0x7B // F12 key + + RIGHT_ALT_PRESSED = 0x0001 + LEFT_ALT_PRESSED = 0x0002 + RIGHT_CTRL_PRESSED = 0x0004 + LEFT_CTRL_PRESSED = 0x0008 + SHIFT_PRESSED = 0x0010 + NUMLOCK_ON = 0x0020 + SCROLLLOCK_ON = 0x0040 + CAPSLOCK_ON = 0x0080 + ENHANCED_KEY = 0x0100 +) + +type ansiCommand struct { + CommandBytes []byte + Command string + Parameters []string + IsSpecial bool +} + +func newAnsiCommand(command []byte) *ansiCommand { + + if isCharacterSelectionCmdChar(command[1]) { + // Is Character Set Selection commands + return &ansiCommand{ + CommandBytes: command, + Command: string(command), + IsSpecial: true, + } + } + + // last char is command character + lastCharIndex := len(command) - 1 + + ac := &ansiCommand{ + CommandBytes: command, + Command: string(command[lastCharIndex]), + IsSpecial: false, + } + + // more than a single escape + if lastCharIndex != 0 { + start := 1 + // skip if double char escape sequence + if command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_ESCAPE_SECONDARY { + start++ + } + // convert this to GetNextParam method + ac.Parameters = strings.Split(string(command[start:lastCharIndex]), ansiterm.ANSI_PARAMETER_SEP) + } + + return ac +} + +func (ac *ansiCommand) paramAsSHORT(index int, defaultValue int16) int16 { + if index < 0 || index >= len(ac.Parameters) { + return defaultValue + } + + param, err := strconv.ParseInt(ac.Parameters[index], 10, 16) + if err != nil { + return defaultValue + } + + return int16(param) +} + +func (ac *ansiCommand) String() string { + return fmt.Sprintf("0x%v \"%v\" (\"%v\")", + bytesToHex(ac.CommandBytes), + ac.Command, + strings.Join(ac.Parameters, "\",\"")) +} + +// isAnsiCommandChar returns true if the passed byte falls within the range of ANSI commands. +// See http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html. +func isAnsiCommandChar(b byte) bool { + switch { + case ansiterm.ANSI_COMMAND_FIRST <= b && b <= ansiterm.ANSI_COMMAND_LAST && b != ansiterm.ANSI_ESCAPE_SECONDARY: + return true + case b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_OSC || b == ansiterm.ANSI_CMD_DECPAM || b == ansiterm.ANSI_CMD_DECPNM: + // non-CSI escape sequence terminator + return true + case b == ansiterm.ANSI_CMD_STR_TERM || b == ansiterm.ANSI_BEL: + // String escape sequence terminator + return true + } + return false +} + +func isXtermOscSequence(command []byte, current byte) bool { + return (len(command) >= 2 && command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_CMD_OSC && current != ansiterm.ANSI_BEL) +} + +func isCharacterSelectionCmdChar(b byte) bool { + return (b == ansiterm.ANSI_CMD_G0 || b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_G2 || b == ansiterm.ANSI_CMD_G3) +} + +// bytesToHex converts a slice of bytes to a human-readable string. +func bytesToHex(b []byte) string { + hex := make([]string, len(b)) + for i, ch := range b { + hex[i] = fmt.Sprintf("%X", ch) + } + return strings.Join(hex, "") +} + +// ensureInRange adjusts the passed value, if necessary, to ensure it is within +// the passed min / max range. +func ensureInRange(n int16, min int16, max int16) int16 { + if n < min { + return min + } else if n > max { + return max + } else { + return n + } +} + +func GetStdFile(nFile int) (*os.File, uintptr) { + var file *os.File + + // syscall uses negative numbers + // windows package uses very big uint32 + // Keep these switches split so we don't have to convert ints too much. + switch uint32(nFile) { + case windows.STD_INPUT_HANDLE: + file = os.Stdin + case windows.STD_OUTPUT_HANDLE: + file = os.Stdout + case windows.STD_ERROR_HANDLE: + file = os.Stderr + default: + switch nFile { + case syscall.STD_INPUT_HANDLE: + file = os.Stdin + case syscall.STD_OUTPUT_HANDLE: + file = os.Stdout + case syscall.STD_ERROR_HANDLE: + file = os.Stderr + default: + panic(fmt.Errorf("Invalid standard handle identifier: %v", nFile)) + } + } + + fd, err := syscall.GetStdHandle(nFile) + if err != nil { + panic(fmt.Errorf("Invalid standard handle identifier: %v -- %v", nFile, err)) + } + + return file, uintptr(fd) +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/api.go b/vendor/github.com/Azure/go-ansiterm/winterm/api.go new file mode 100644 index 0000000000..6055e33b91 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/api.go @@ -0,0 +1,327 @@ +// +build windows + +package winterm + +import ( + "fmt" + "syscall" + "unsafe" +) + +//=========================================================================================================== +// IMPORTANT NOTE: +// +// The methods below make extensive use of the "unsafe" package to obtain the required pointers. +// Beginning in Go 1.3, the garbage collector may release local variables (e.g., incoming arguments, stack +// variables) the pointers reference *before* the API completes. +// +// As a result, in those cases, the code must hint that the variables remain in active by invoking the +// dummy method "use" (see below). Newer versions of Go are planned to change the mechanism to no longer +// require unsafe pointers. +// +// If you add or modify methods, ENSURE protection of local variables through the "use" builtin to inform +// the garbage collector the variables remain in use if: +// +// -- The value is not a pointer (e.g., int32, struct) +// -- The value is not referenced by the method after passing the pointer to Windows +// +// See http://golang.org/doc/go1.3. +//=========================================================================================================== + +var ( + kernel32DLL = syscall.NewLazyDLL("kernel32.dll") + + getConsoleCursorInfoProc = kernel32DLL.NewProc("GetConsoleCursorInfo") + setConsoleCursorInfoProc = kernel32DLL.NewProc("SetConsoleCursorInfo") + setConsoleCursorPositionProc = kernel32DLL.NewProc("SetConsoleCursorPosition") + setConsoleModeProc = kernel32DLL.NewProc("SetConsoleMode") + getConsoleScreenBufferInfoProc = kernel32DLL.NewProc("GetConsoleScreenBufferInfo") + setConsoleScreenBufferSizeProc = kernel32DLL.NewProc("SetConsoleScreenBufferSize") + scrollConsoleScreenBufferProc = kernel32DLL.NewProc("ScrollConsoleScreenBufferA") + setConsoleTextAttributeProc = kernel32DLL.NewProc("SetConsoleTextAttribute") + setConsoleWindowInfoProc = kernel32DLL.NewProc("SetConsoleWindowInfo") + writeConsoleOutputProc = kernel32DLL.NewProc("WriteConsoleOutputW") + readConsoleInputProc = kernel32DLL.NewProc("ReadConsoleInputW") + waitForSingleObjectProc = kernel32DLL.NewProc("WaitForSingleObject") +) + +// Windows Console constants +const ( + // Console modes + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx. + ENABLE_PROCESSED_INPUT = 0x0001 + ENABLE_LINE_INPUT = 0x0002 + ENABLE_ECHO_INPUT = 0x0004 + ENABLE_WINDOW_INPUT = 0x0008 + ENABLE_MOUSE_INPUT = 0x0010 + ENABLE_INSERT_MODE = 0x0020 + ENABLE_QUICK_EDIT_MODE = 0x0040 + ENABLE_EXTENDED_FLAGS = 0x0080 + ENABLE_AUTO_POSITION = 0x0100 + ENABLE_VIRTUAL_TERMINAL_INPUT = 0x0200 + + ENABLE_PROCESSED_OUTPUT = 0x0001 + ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002 + ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004 + DISABLE_NEWLINE_AUTO_RETURN = 0x0008 + ENABLE_LVB_GRID_WORLDWIDE = 0x0010 + + // Character attributes + // Note: + // -- The attributes are combined to produce various colors (e.g., Blue + Green will create Cyan). + // Clearing all foreground or background colors results in black; setting all creates white. + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682088(v=vs.85).aspx#_win32_character_attributes. + FOREGROUND_BLUE uint16 = 0x0001 + FOREGROUND_GREEN uint16 = 0x0002 + FOREGROUND_RED uint16 = 0x0004 + FOREGROUND_INTENSITY uint16 = 0x0008 + FOREGROUND_MASK uint16 = 0x000F + + BACKGROUND_BLUE uint16 = 0x0010 + BACKGROUND_GREEN uint16 = 0x0020 + BACKGROUND_RED uint16 = 0x0040 + BACKGROUND_INTENSITY uint16 = 0x0080 + BACKGROUND_MASK uint16 = 0x00F0 + + COMMON_LVB_MASK uint16 = 0xFF00 + COMMON_LVB_REVERSE_VIDEO uint16 = 0x4000 + COMMON_LVB_UNDERSCORE uint16 = 0x8000 + + // Input event types + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx. + KEY_EVENT = 0x0001 + MOUSE_EVENT = 0x0002 + WINDOW_BUFFER_SIZE_EVENT = 0x0004 + MENU_EVENT = 0x0008 + FOCUS_EVENT = 0x0010 + + // WaitForSingleObject return codes + WAIT_ABANDONED = 0x00000080 + WAIT_FAILED = 0xFFFFFFFF + WAIT_SIGNALED = 0x0000000 + WAIT_TIMEOUT = 0x00000102 + + // WaitForSingleObject wait duration + WAIT_INFINITE = 0xFFFFFFFF + WAIT_ONE_SECOND = 1000 + WAIT_HALF_SECOND = 500 + WAIT_QUARTER_SECOND = 250 +) + +// Windows API Console types +// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682101(v=vs.85).aspx for Console specific types (e.g., COORD) +// -- See https://msdn.microsoft.com/en-us/library/aa296569(v=vs.60).aspx for comments on alignment +type ( + CHAR_INFO struct { + UnicodeChar uint16 + Attributes uint16 + } + + CONSOLE_CURSOR_INFO struct { + Size uint32 + Visible int32 + } + + CONSOLE_SCREEN_BUFFER_INFO struct { + Size COORD + CursorPosition COORD + Attributes uint16 + Window SMALL_RECT + MaximumWindowSize COORD + } + + COORD struct { + X int16 + Y int16 + } + + SMALL_RECT struct { + Left int16 + Top int16 + Right int16 + Bottom int16 + } + + // INPUT_RECORD is a C/C++ union of which KEY_EVENT_RECORD is one case, it is also the largest + // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx. + INPUT_RECORD struct { + EventType uint16 + KeyEvent KEY_EVENT_RECORD + } + + KEY_EVENT_RECORD struct { + KeyDown int32 + RepeatCount uint16 + VirtualKeyCode uint16 + VirtualScanCode uint16 + UnicodeChar uint16 + ControlKeyState uint32 + } + + WINDOW_BUFFER_SIZE struct { + Size COORD + } +) + +// boolToBOOL converts a Go bool into a Windows int32. +func boolToBOOL(f bool) int32 { + if f { + return int32(1) + } else { + return int32(0) + } +} + +// GetConsoleCursorInfo retrieves information about the size and visiblity of the console cursor. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683163(v=vs.85).aspx. +func GetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error { + r1, r2, err := getConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0) + return checkError(r1, r2, err) +} + +// SetConsoleCursorInfo sets the size and visiblity of the console cursor. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686019(v=vs.85).aspx. +func SetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error { + r1, r2, err := setConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0) + return checkError(r1, r2, err) +} + +// SetConsoleCursorPosition location of the console cursor. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx. +func SetConsoleCursorPosition(handle uintptr, coord COORD) error { + r1, r2, err := setConsoleCursorPositionProc.Call(handle, coordToPointer(coord)) + use(coord) + return checkError(r1, r2, err) +} + +// GetConsoleMode gets the console mode for given file descriptor +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx. +func GetConsoleMode(handle uintptr) (mode uint32, err error) { + err = syscall.GetConsoleMode(syscall.Handle(handle), &mode) + return mode, err +} + +// SetConsoleMode sets the console mode for given file descriptor +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx. +func SetConsoleMode(handle uintptr, mode uint32) error { + r1, r2, err := setConsoleModeProc.Call(handle, uintptr(mode), 0) + use(mode) + return checkError(r1, r2, err) +} + +// GetConsoleScreenBufferInfo retrieves information about the specified console screen buffer. +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx. +func GetConsoleScreenBufferInfo(handle uintptr) (*CONSOLE_SCREEN_BUFFER_INFO, error) { + info := CONSOLE_SCREEN_BUFFER_INFO{} + err := checkError(getConsoleScreenBufferInfoProc.Call(handle, uintptr(unsafe.Pointer(&info)), 0)) + if err != nil { + return nil, err + } + return &info, nil +} + +func ScrollConsoleScreenBuffer(handle uintptr, scrollRect SMALL_RECT, clipRect SMALL_RECT, destOrigin COORD, char CHAR_INFO) error { + r1, r2, err := scrollConsoleScreenBufferProc.Call(handle, uintptr(unsafe.Pointer(&scrollRect)), uintptr(unsafe.Pointer(&clipRect)), coordToPointer(destOrigin), uintptr(unsafe.Pointer(&char))) + use(scrollRect) + use(clipRect) + use(destOrigin) + use(char) + return checkError(r1, r2, err) +} + +// SetConsoleScreenBufferSize sets the size of the console screen buffer. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686044(v=vs.85).aspx. +func SetConsoleScreenBufferSize(handle uintptr, coord COORD) error { + r1, r2, err := setConsoleScreenBufferSizeProc.Call(handle, coordToPointer(coord)) + use(coord) + return checkError(r1, r2, err) +} + +// SetConsoleTextAttribute sets the attributes of characters written to the +// console screen buffer by the WriteFile or WriteConsole function. +// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686047(v=vs.85).aspx. +func SetConsoleTextAttribute(handle uintptr, attribute uint16) error { + r1, r2, err := setConsoleTextAttributeProc.Call(handle, uintptr(attribute), 0) + use(attribute) + return checkError(r1, r2, err) +} + +// SetConsoleWindowInfo sets the size and position of the console screen buffer's window. +// Note that the size and location must be within and no larger than the backing console screen buffer. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686125(v=vs.85).aspx. +func SetConsoleWindowInfo(handle uintptr, isAbsolute bool, rect SMALL_RECT) error { + r1, r2, err := setConsoleWindowInfoProc.Call(handle, uintptr(boolToBOOL(isAbsolute)), uintptr(unsafe.Pointer(&rect))) + use(isAbsolute) + use(rect) + return checkError(r1, r2, err) +} + +// WriteConsoleOutput writes the CHAR_INFOs from the provided buffer to the active console buffer. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687404(v=vs.85).aspx. +func WriteConsoleOutput(handle uintptr, buffer []CHAR_INFO, bufferSize COORD, bufferCoord COORD, writeRegion *SMALL_RECT) error { + r1, r2, err := writeConsoleOutputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), coordToPointer(bufferSize), coordToPointer(bufferCoord), uintptr(unsafe.Pointer(writeRegion))) + use(buffer) + use(bufferSize) + use(bufferCoord) + return checkError(r1, r2, err) +} + +// ReadConsoleInput reads (and removes) data from the console input buffer. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms684961(v=vs.85).aspx. +func ReadConsoleInput(handle uintptr, buffer []INPUT_RECORD, count *uint32) error { + r1, r2, err := readConsoleInputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), uintptr(len(buffer)), uintptr(unsafe.Pointer(count))) + use(buffer) + return checkError(r1, r2, err) +} + +// WaitForSingleObject waits for the passed handle to be signaled. +// It returns true if the handle was signaled; false otherwise. +// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687032(v=vs.85).aspx. +func WaitForSingleObject(handle uintptr, msWait uint32) (bool, error) { + r1, _, err := waitForSingleObjectProc.Call(handle, uintptr(uint32(msWait))) + switch r1 { + case WAIT_ABANDONED, WAIT_TIMEOUT: + return false, nil + case WAIT_SIGNALED: + return true, nil + } + use(msWait) + return false, err +} + +// String helpers +func (info CONSOLE_SCREEN_BUFFER_INFO) String() string { + return fmt.Sprintf("Size(%v) Cursor(%v) Window(%v) Max(%v)", info.Size, info.CursorPosition, info.Window, info.MaximumWindowSize) +} + +func (coord COORD) String() string { + return fmt.Sprintf("%v,%v", coord.X, coord.Y) +} + +func (rect SMALL_RECT) String() string { + return fmt.Sprintf("(%v,%v),(%v,%v)", rect.Left, rect.Top, rect.Right, rect.Bottom) +} + +// checkError evaluates the results of a Windows API call and returns the error if it failed. +func checkError(r1, r2 uintptr, err error) error { + // Windows APIs return non-zero to indicate success + if r1 != 0 { + return nil + } + + // Return the error if provided, otherwise default to EINVAL + if err != nil { + return err + } + return syscall.EINVAL +} + +// coordToPointer converts a COORD into a uintptr (by fooling the type system). +func coordToPointer(c COORD) uintptr { + // Note: This code assumes the two SHORTs are correctly laid out; the "cast" to uint32 is just to get a pointer to pass. + return uintptr(*((*uint32)(unsafe.Pointer(&c)))) +} + +// use is a no-op, but the compiler cannot see that it is. +// Calling use(p) ensures that p is kept live until that point. +func use(p interface{}) {} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go b/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go new file mode 100644 index 0000000000..cbec8f728f --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/attr_translation.go @@ -0,0 +1,100 @@ +// +build windows + +package winterm + +import "github.com/Azure/go-ansiterm" + +const ( + FOREGROUND_COLOR_MASK = FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE + BACKGROUND_COLOR_MASK = BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE +) + +// collectAnsiIntoWindowsAttributes modifies the passed Windows text mode flags to reflect the +// request represented by the passed ANSI mode. +func collectAnsiIntoWindowsAttributes(windowsMode uint16, inverted bool, baseMode uint16, ansiMode int16) (uint16, bool) { + switch ansiMode { + + // Mode styles + case ansiterm.ANSI_SGR_BOLD: + windowsMode = windowsMode | FOREGROUND_INTENSITY + + case ansiterm.ANSI_SGR_DIM, ansiterm.ANSI_SGR_BOLD_DIM_OFF: + windowsMode &^= FOREGROUND_INTENSITY + + case ansiterm.ANSI_SGR_UNDERLINE: + windowsMode = windowsMode | COMMON_LVB_UNDERSCORE + + case ansiterm.ANSI_SGR_REVERSE: + inverted = true + + case ansiterm.ANSI_SGR_REVERSE_OFF: + inverted = false + + case ansiterm.ANSI_SGR_UNDERLINE_OFF: + windowsMode &^= COMMON_LVB_UNDERSCORE + + // Foreground colors + case ansiterm.ANSI_SGR_FOREGROUND_DEFAULT: + windowsMode = (windowsMode &^ FOREGROUND_MASK) | (baseMode & FOREGROUND_MASK) + + case ansiterm.ANSI_SGR_FOREGROUND_BLACK: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) + + case ansiterm.ANSI_SGR_FOREGROUND_RED: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED + + case ansiterm.ANSI_SGR_FOREGROUND_GREEN: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN + + case ansiterm.ANSI_SGR_FOREGROUND_YELLOW: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN + + case ansiterm.ANSI_SGR_FOREGROUND_BLUE: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_BLUE + + case ansiterm.ANSI_SGR_FOREGROUND_MAGENTA: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_BLUE + + case ansiterm.ANSI_SGR_FOREGROUND_CYAN: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN | FOREGROUND_BLUE + + case ansiterm.ANSI_SGR_FOREGROUND_WHITE: + windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE + + // Background colors + case ansiterm.ANSI_SGR_BACKGROUND_DEFAULT: + // Black with no intensity + windowsMode = (windowsMode &^ BACKGROUND_MASK) | (baseMode & BACKGROUND_MASK) + + case ansiterm.ANSI_SGR_BACKGROUND_BLACK: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) + + case ansiterm.ANSI_SGR_BACKGROUND_RED: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED + + case ansiterm.ANSI_SGR_BACKGROUND_GREEN: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN + + case ansiterm.ANSI_SGR_BACKGROUND_YELLOW: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN + + case ansiterm.ANSI_SGR_BACKGROUND_BLUE: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_BLUE + + case ansiterm.ANSI_SGR_BACKGROUND_MAGENTA: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_BLUE + + case ansiterm.ANSI_SGR_BACKGROUND_CYAN: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN | BACKGROUND_BLUE + + case ansiterm.ANSI_SGR_BACKGROUND_WHITE: + windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE + } + + return windowsMode, inverted +} + +// invertAttributes inverts the foreground and background colors of a Windows attributes value +func invertAttributes(windowsMode uint16) uint16 { + return (COMMON_LVB_MASK & windowsMode) | ((FOREGROUND_MASK & windowsMode) << 4) | ((BACKGROUND_MASK & windowsMode) >> 4) +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go b/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go new file mode 100644 index 0000000000..3ee06ea728 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go @@ -0,0 +1,101 @@ +// +build windows + +package winterm + +const ( + horizontal = iota + vertical +) + +func (h *windowsAnsiEventHandler) getCursorWindow(info *CONSOLE_SCREEN_BUFFER_INFO) SMALL_RECT { + if h.originMode { + sr := h.effectiveSr(info.Window) + return SMALL_RECT{ + Top: sr.top, + Bottom: sr.bottom, + Left: 0, + Right: info.Size.X - 1, + } + } else { + return SMALL_RECT{ + Top: info.Window.Top, + Bottom: info.Window.Bottom, + Left: 0, + Right: info.Size.X - 1, + } + } +} + +// setCursorPosition sets the cursor to the specified position, bounded to the screen size +func (h *windowsAnsiEventHandler) setCursorPosition(position COORD, window SMALL_RECT) error { + position.X = ensureInRange(position.X, window.Left, window.Right) + position.Y = ensureInRange(position.Y, window.Top, window.Bottom) + err := SetConsoleCursorPosition(h.fd, position) + if err != nil { + return err + } + h.logf("Cursor position set: (%d, %d)", position.X, position.Y) + return err +} + +func (h *windowsAnsiEventHandler) moveCursorVertical(param int) error { + return h.moveCursor(vertical, param) +} + +func (h *windowsAnsiEventHandler) moveCursorHorizontal(param int) error { + return h.moveCursor(horizontal, param) +} + +func (h *windowsAnsiEventHandler) moveCursor(moveMode int, param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + position := info.CursorPosition + switch moveMode { + case horizontal: + position.X += int16(param) + case vertical: + position.Y += int16(param) + } + + if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { + return err + } + + return nil +} + +func (h *windowsAnsiEventHandler) moveCursorLine(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + position := info.CursorPosition + position.X = 0 + position.Y += int16(param) + + if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { + return err + } + + return nil +} + +func (h *windowsAnsiEventHandler) moveCursorColumn(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + position := info.CursorPosition + position.X = int16(param) - 1 + + if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go b/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go new file mode 100644 index 0000000000..244b5fa25e --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/erase_helpers.go @@ -0,0 +1,84 @@ +// +build windows + +package winterm + +import "github.com/Azure/go-ansiterm" + +func (h *windowsAnsiEventHandler) clearRange(attributes uint16, fromCoord COORD, toCoord COORD) error { + // Ignore an invalid (negative area) request + if toCoord.Y < fromCoord.Y { + return nil + } + + var err error + + var coordStart = COORD{} + var coordEnd = COORD{} + + xCurrent, yCurrent := fromCoord.X, fromCoord.Y + xEnd, yEnd := toCoord.X, toCoord.Y + + // Clear any partial initial line + if xCurrent > 0 { + coordStart.X, coordStart.Y = xCurrent, yCurrent + coordEnd.X, coordEnd.Y = xEnd, yCurrent + + err = h.clearRect(attributes, coordStart, coordEnd) + if err != nil { + return err + } + + xCurrent = 0 + yCurrent += 1 + } + + // Clear intervening rectangular section + if yCurrent < yEnd { + coordStart.X, coordStart.Y = xCurrent, yCurrent + coordEnd.X, coordEnd.Y = xEnd, yEnd-1 + + err = h.clearRect(attributes, coordStart, coordEnd) + if err != nil { + return err + } + + xCurrent = 0 + yCurrent = yEnd + } + + // Clear remaining partial ending line + coordStart.X, coordStart.Y = xCurrent, yCurrent + coordEnd.X, coordEnd.Y = xEnd, yEnd + + err = h.clearRect(attributes, coordStart, coordEnd) + if err != nil { + return err + } + + return nil +} + +func (h *windowsAnsiEventHandler) clearRect(attributes uint16, fromCoord COORD, toCoord COORD) error { + region := SMALL_RECT{Top: fromCoord.Y, Left: fromCoord.X, Bottom: toCoord.Y, Right: toCoord.X} + width := toCoord.X - fromCoord.X + 1 + height := toCoord.Y - fromCoord.Y + 1 + size := uint32(width) * uint32(height) + + if size <= 0 { + return nil + } + + buffer := make([]CHAR_INFO, size) + + char := CHAR_INFO{ansiterm.FILL_CHARACTER, attributes} + for i := 0; i < int(size); i++ { + buffer[i] = char + } + + err := WriteConsoleOutput(h.fd, buffer, COORD{X: width, Y: height}, COORD{X: 0, Y: 0}, ®ion) + if err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go b/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go new file mode 100644 index 0000000000..2d27fa1d02 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/scroll_helper.go @@ -0,0 +1,118 @@ +// +build windows + +package winterm + +// effectiveSr gets the current effective scroll region in buffer coordinates +func (h *windowsAnsiEventHandler) effectiveSr(window SMALL_RECT) scrollRegion { + top := addInRange(window.Top, h.sr.top, window.Top, window.Bottom) + bottom := addInRange(window.Top, h.sr.bottom, window.Top, window.Bottom) + if top >= bottom { + top = window.Top + bottom = window.Bottom + } + return scrollRegion{top: top, bottom: bottom} +} + +func (h *windowsAnsiEventHandler) scrollUp(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + sr := h.effectiveSr(info.Window) + return h.scroll(param, sr, info) +} + +func (h *windowsAnsiEventHandler) scrollDown(param int) error { + return h.scrollUp(-param) +} + +func (h *windowsAnsiEventHandler) deleteLines(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + start := info.CursorPosition.Y + sr := h.effectiveSr(info.Window) + // Lines cannot be inserted or deleted outside the scrolling region. + if start >= sr.top && start <= sr.bottom { + sr.top = start + return h.scroll(param, sr, info) + } else { + return nil + } +} + +func (h *windowsAnsiEventHandler) insertLines(param int) error { + return h.deleteLines(-param) +} + +// scroll scrolls the provided scroll region by param lines. The scroll region is in buffer coordinates. +func (h *windowsAnsiEventHandler) scroll(param int, sr scrollRegion, info *CONSOLE_SCREEN_BUFFER_INFO) error { + h.logf("scroll: scrollTop: %d, scrollBottom: %d", sr.top, sr.bottom) + h.logf("scroll: windowTop: %d, windowBottom: %d", info.Window.Top, info.Window.Bottom) + + // Copy from and clip to the scroll region (full buffer width) + scrollRect := SMALL_RECT{ + Top: sr.top, + Bottom: sr.bottom, + Left: 0, + Right: info.Size.X - 1, + } + + // Origin to which area should be copied + destOrigin := COORD{ + X: 0, + Y: sr.top - int16(param), + } + + char := CHAR_INFO{ + UnicodeChar: ' ', + Attributes: h.attributes, + } + + if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil { + return err + } + return nil +} + +func (h *windowsAnsiEventHandler) deleteCharacters(param int) error { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + return h.scrollLine(param, info.CursorPosition, info) +} + +func (h *windowsAnsiEventHandler) insertCharacters(param int) error { + return h.deleteCharacters(-param) +} + +// scrollLine scrolls a line horizontally starting at the provided position by a number of columns. +func (h *windowsAnsiEventHandler) scrollLine(columns int, position COORD, info *CONSOLE_SCREEN_BUFFER_INFO) error { + // Copy from and clip to the scroll region (full buffer width) + scrollRect := SMALL_RECT{ + Top: position.Y, + Bottom: position.Y, + Left: position.X, + Right: info.Size.X - 1, + } + + // Origin to which area should be copied + destOrigin := COORD{ + X: position.X - int16(columns), + Y: position.Y, + } + + char := CHAR_INFO{ + UnicodeChar: ' ', + Attributes: h.attributes, + } + + if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go b/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go new file mode 100644 index 0000000000..afa7635d77 --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/utilities.go @@ -0,0 +1,9 @@ +// +build windows + +package winterm + +// AddInRange increments a value by the passed quantity while ensuring the values +// always remain within the supplied min / max range. +func addInRange(n int16, increment int16, min int16, max int16) int16 { + return ensureInRange(n+increment, min, max) +} diff --git a/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go b/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go new file mode 100644 index 0000000000..2d40fb75ad --- /dev/null +++ b/vendor/github.com/Azure/go-ansiterm/winterm/win_event_handler.go @@ -0,0 +1,743 @@ +// +build windows + +package winterm + +import ( + "bytes" + "log" + "os" + "strconv" + + "github.com/Azure/go-ansiterm" +) + +type windowsAnsiEventHandler struct { + fd uintptr + file *os.File + infoReset *CONSOLE_SCREEN_BUFFER_INFO + sr scrollRegion + buffer bytes.Buffer + attributes uint16 + inverted bool + wrapNext bool + drewMarginByte bool + originMode bool + marginByte byte + curInfo *CONSOLE_SCREEN_BUFFER_INFO + curPos COORD + logf func(string, ...interface{}) +} + +type Option func(*windowsAnsiEventHandler) + +func WithLogf(f func(string, ...interface{})) Option { + return func(w *windowsAnsiEventHandler) { + w.logf = f + } +} + +func CreateWinEventHandler(fd uintptr, file *os.File, opts ...Option) ansiterm.AnsiEventHandler { + infoReset, err := GetConsoleScreenBufferInfo(fd) + if err != nil { + return nil + } + + h := &windowsAnsiEventHandler{ + fd: fd, + file: file, + infoReset: infoReset, + attributes: infoReset.Attributes, + } + for _, o := range opts { + o(h) + } + + if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { + logFile, _ := os.Create("winEventHandler.log") + logger := log.New(logFile, "", log.LstdFlags) + if h.logf != nil { + l := h.logf + h.logf = func(s string, v ...interface{}) { + l(s, v...) + logger.Printf(s, v...) + } + } else { + h.logf = logger.Printf + } + } + + if h.logf == nil { + h.logf = func(string, ...interface{}) {} + } + + return h +} + +type scrollRegion struct { + top int16 + bottom int16 +} + +// simulateLF simulates a LF or CR+LF by scrolling if necessary to handle the +// current cursor position and scroll region settings, in which case it returns +// true. If no special handling is necessary, then it does nothing and returns +// false. +// +// In the false case, the caller should ensure that a carriage return +// and line feed are inserted or that the text is otherwise wrapped. +func (h *windowsAnsiEventHandler) simulateLF(includeCR bool) (bool, error) { + if h.wrapNext { + if err := h.Flush(); err != nil { + return false, err + } + h.clearWrap() + } + pos, info, err := h.getCurrentInfo() + if err != nil { + return false, err + } + sr := h.effectiveSr(info.Window) + if pos.Y == sr.bottom { + // Scrolling is necessary. Let Windows automatically scroll if the scrolling region + // is the full window. + if sr.top == info.Window.Top && sr.bottom == info.Window.Bottom { + if includeCR { + pos.X = 0 + h.updatePos(pos) + } + return false, nil + } + + // A custom scroll region is active. Scroll the window manually to simulate + // the LF. + if err := h.Flush(); err != nil { + return false, err + } + h.logf("Simulating LF inside scroll region") + if err := h.scrollUp(1); err != nil { + return false, err + } + if includeCR { + pos.X = 0 + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return false, err + } + } + return true, nil + + } else if pos.Y < info.Window.Bottom { + // Let Windows handle the LF. + pos.Y++ + if includeCR { + pos.X = 0 + } + h.updatePos(pos) + return false, nil + } else { + // The cursor is at the bottom of the screen but outside the scroll + // region. Skip the LF. + h.logf("Simulating LF outside scroll region") + if includeCR { + if err := h.Flush(); err != nil { + return false, err + } + pos.X = 0 + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return false, err + } + } + return true, nil + } +} + +// executeLF executes a LF without a CR. +func (h *windowsAnsiEventHandler) executeLF() error { + handled, err := h.simulateLF(false) + if err != nil { + return err + } + if !handled { + // Windows LF will reset the cursor column position. Write the LF + // and restore the cursor position. + pos, _, err := h.getCurrentInfo() + if err != nil { + return err + } + h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED) + if pos.X != 0 { + if err := h.Flush(); err != nil { + return err + } + h.logf("Resetting cursor position for LF without CR") + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return err + } + } + } + return nil +} + +func (h *windowsAnsiEventHandler) Print(b byte) error { + if h.wrapNext { + h.buffer.WriteByte(h.marginByte) + h.clearWrap() + if _, err := h.simulateLF(true); err != nil { + return err + } + } + pos, info, err := h.getCurrentInfo() + if err != nil { + return err + } + if pos.X == info.Size.X-1 { + h.wrapNext = true + h.marginByte = b + } else { + pos.X++ + h.updatePos(pos) + h.buffer.WriteByte(b) + } + return nil +} + +func (h *windowsAnsiEventHandler) Execute(b byte) error { + switch b { + case ansiterm.ANSI_TAB: + h.logf("Execute(TAB)") + // Move to the next tab stop, but preserve auto-wrap if already set. + if !h.wrapNext { + pos, info, err := h.getCurrentInfo() + if err != nil { + return err + } + pos.X = (pos.X + 8) - pos.X%8 + if pos.X >= info.Size.X { + pos.X = info.Size.X - 1 + } + if err := h.Flush(); err != nil { + return err + } + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return err + } + } + return nil + + case ansiterm.ANSI_BEL: + h.buffer.WriteByte(ansiterm.ANSI_BEL) + return nil + + case ansiterm.ANSI_BACKSPACE: + if h.wrapNext { + if err := h.Flush(); err != nil { + return err + } + h.clearWrap() + } + pos, _, err := h.getCurrentInfo() + if err != nil { + return err + } + if pos.X > 0 { + pos.X-- + h.updatePos(pos) + h.buffer.WriteByte(ansiterm.ANSI_BACKSPACE) + } + return nil + + case ansiterm.ANSI_VERTICAL_TAB, ansiterm.ANSI_FORM_FEED: + // Treat as true LF. + return h.executeLF() + + case ansiterm.ANSI_LINE_FEED: + // Simulate a CR and LF for now since there is no way in go-ansiterm + // to tell if the LF should include CR (and more things break when it's + // missing than when it's incorrectly added). + handled, err := h.simulateLF(true) + if handled || err != nil { + return err + } + return h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED) + + case ansiterm.ANSI_CARRIAGE_RETURN: + if h.wrapNext { + if err := h.Flush(); err != nil { + return err + } + h.clearWrap() + } + pos, _, err := h.getCurrentInfo() + if err != nil { + return err + } + if pos.X != 0 { + pos.X = 0 + h.updatePos(pos) + h.buffer.WriteByte(ansiterm.ANSI_CARRIAGE_RETURN) + } + return nil + + default: + return nil + } +} + +func (h *windowsAnsiEventHandler) CUU(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CUU: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorVertical(-param) +} + +func (h *windowsAnsiEventHandler) CUD(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CUD: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorVertical(param) +} + +func (h *windowsAnsiEventHandler) CUF(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CUF: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorHorizontal(param) +} + +func (h *windowsAnsiEventHandler) CUB(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CUB: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorHorizontal(-param) +} + +func (h *windowsAnsiEventHandler) CNL(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CNL: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorLine(param) +} + +func (h *windowsAnsiEventHandler) CPL(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CPL: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorLine(-param) +} + +func (h *windowsAnsiEventHandler) CHA(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CHA: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.moveCursorColumn(param) +} + +func (h *windowsAnsiEventHandler) VPA(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("VPA: [[%d]]", param) + h.clearWrap() + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + window := h.getCursorWindow(info) + position := info.CursorPosition + position.Y = window.Top + int16(param) - 1 + return h.setCursorPosition(position, window) +} + +func (h *windowsAnsiEventHandler) CUP(row int, col int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("CUP: [[%d %d]]", row, col) + h.clearWrap() + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + window := h.getCursorWindow(info) + position := COORD{window.Left + int16(col) - 1, window.Top + int16(row) - 1} + return h.setCursorPosition(position, window) +} + +func (h *windowsAnsiEventHandler) HVP(row int, col int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("HVP: [[%d %d]]", row, col) + h.clearWrap() + return h.CUP(row, col) +} + +func (h *windowsAnsiEventHandler) DECTCEM(visible bool) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("DECTCEM: [%v]", []string{strconv.FormatBool(visible)}) + h.clearWrap() + return nil +} + +func (h *windowsAnsiEventHandler) DECOM(enable bool) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("DECOM: [%v]", []string{strconv.FormatBool(enable)}) + h.clearWrap() + h.originMode = enable + return h.CUP(1, 1) +} + +func (h *windowsAnsiEventHandler) DECCOLM(use132 bool) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("DECCOLM: [%v]", []string{strconv.FormatBool(use132)}) + h.clearWrap() + if err := h.ED(2); err != nil { + return err + } + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + targetWidth := int16(80) + if use132 { + targetWidth = 132 + } + if info.Size.X < targetWidth { + if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil { + h.logf("set buffer failed: %v", err) + return err + } + } + window := info.Window + window.Left = 0 + window.Right = targetWidth - 1 + if err := SetConsoleWindowInfo(h.fd, true, window); err != nil { + h.logf("set window failed: %v", err) + return err + } + if info.Size.X > targetWidth { + if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil { + h.logf("set buffer failed: %v", err) + return err + } + } + return SetConsoleCursorPosition(h.fd, COORD{0, 0}) +} + +func (h *windowsAnsiEventHandler) ED(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("ED: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + + // [J -- Erases from the cursor to the end of the screen, including the cursor position. + // [1J -- Erases from the beginning of the screen to the cursor, including the cursor position. + // [2J -- Erases the complete display. The cursor does not move. + // Notes: + // -- Clearing the entire buffer, versus just the Window, works best for Windows Consoles + + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + var start COORD + var end COORD + + switch param { + case 0: + start = info.CursorPosition + end = COORD{info.Size.X - 1, info.Size.Y - 1} + + case 1: + start = COORD{0, 0} + end = info.CursorPosition + + case 2: + start = COORD{0, 0} + end = COORD{info.Size.X - 1, info.Size.Y - 1} + } + + err = h.clearRange(h.attributes, start, end) + if err != nil { + return err + } + + // If the whole buffer was cleared, move the window to the top while preserving + // the window-relative cursor position. + if param == 2 { + pos := info.CursorPosition + window := info.Window + pos.Y -= window.Top + window.Bottom -= window.Top + window.Top = 0 + if err := SetConsoleCursorPosition(h.fd, pos); err != nil { + return err + } + if err := SetConsoleWindowInfo(h.fd, true, window); err != nil { + return err + } + } + + return nil +} + +func (h *windowsAnsiEventHandler) EL(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("EL: [%v]", strconv.Itoa(param)) + h.clearWrap() + + // [K -- Erases from the cursor to the end of the line, including the cursor position. + // [1K -- Erases from the beginning of the line to the cursor, including the cursor position. + // [2K -- Erases the complete line. + + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + var start COORD + var end COORD + + switch param { + case 0: + start = info.CursorPosition + end = COORD{info.Size.X, info.CursorPosition.Y} + + case 1: + start = COORD{0, info.CursorPosition.Y} + end = info.CursorPosition + + case 2: + start = COORD{0, info.CursorPosition.Y} + end = COORD{info.Size.X, info.CursorPosition.Y} + } + + err = h.clearRange(h.attributes, start, end) + if err != nil { + return err + } + + return nil +} + +func (h *windowsAnsiEventHandler) IL(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("IL: [%v]", strconv.Itoa(param)) + h.clearWrap() + return h.insertLines(param) +} + +func (h *windowsAnsiEventHandler) DL(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("DL: [%v]", strconv.Itoa(param)) + h.clearWrap() + return h.deleteLines(param) +} + +func (h *windowsAnsiEventHandler) ICH(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("ICH: [%v]", strconv.Itoa(param)) + h.clearWrap() + return h.insertCharacters(param) +} + +func (h *windowsAnsiEventHandler) DCH(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("DCH: [%v]", strconv.Itoa(param)) + h.clearWrap() + return h.deleteCharacters(param) +} + +func (h *windowsAnsiEventHandler) SGR(params []int) error { + if err := h.Flush(); err != nil { + return err + } + strings := []string{} + for _, v := range params { + strings = append(strings, strconv.Itoa(v)) + } + + h.logf("SGR: [%v]", strings) + + if len(params) <= 0 { + h.attributes = h.infoReset.Attributes + h.inverted = false + } else { + for _, attr := range params { + + if attr == ansiterm.ANSI_SGR_RESET { + h.attributes = h.infoReset.Attributes + h.inverted = false + continue + } + + h.attributes, h.inverted = collectAnsiIntoWindowsAttributes(h.attributes, h.inverted, h.infoReset.Attributes, int16(attr)) + } + } + + attributes := h.attributes + if h.inverted { + attributes = invertAttributes(attributes) + } + err := SetConsoleTextAttribute(h.fd, attributes) + if err != nil { + return err + } + + return nil +} + +func (h *windowsAnsiEventHandler) SU(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("SU: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.scrollUp(param) +} + +func (h *windowsAnsiEventHandler) SD(param int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("SD: [%v]", []string{strconv.Itoa(param)}) + h.clearWrap() + return h.scrollDown(param) +} + +func (h *windowsAnsiEventHandler) DA(params []string) error { + h.logf("DA: [%v]", params) + // DA cannot be implemented because it must send data on the VT100 input stream, + // which is not available to go-ansiterm. + return nil +} + +func (h *windowsAnsiEventHandler) DECSTBM(top int, bottom int) error { + if err := h.Flush(); err != nil { + return err + } + h.logf("DECSTBM: [%d, %d]", top, bottom) + + // Windows is 0 indexed, Linux is 1 indexed + h.sr.top = int16(top - 1) + h.sr.bottom = int16(bottom - 1) + + // This command also moves the cursor to the origin. + h.clearWrap() + return h.CUP(1, 1) +} + +func (h *windowsAnsiEventHandler) RI() error { + if err := h.Flush(); err != nil { + return err + } + h.logf("RI: []") + h.clearWrap() + + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + sr := h.effectiveSr(info.Window) + if info.CursorPosition.Y == sr.top { + return h.scrollDown(1) + } + + return h.moveCursorVertical(-1) +} + +func (h *windowsAnsiEventHandler) IND() error { + h.logf("IND: []") + return h.executeLF() +} + +func (h *windowsAnsiEventHandler) Flush() error { + h.curInfo = nil + if h.buffer.Len() > 0 { + h.logf("Flush: [%s]", h.buffer.Bytes()) + if _, err := h.buffer.WriteTo(h.file); err != nil { + return err + } + } + + if h.wrapNext && !h.drewMarginByte { + h.logf("Flush: drawing margin byte '%c'", h.marginByte) + + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return err + } + + charInfo := []CHAR_INFO{{UnicodeChar: uint16(h.marginByte), Attributes: info.Attributes}} + size := COORD{1, 1} + position := COORD{0, 0} + region := SMALL_RECT{Left: info.CursorPosition.X, Top: info.CursorPosition.Y, Right: info.CursorPosition.X, Bottom: info.CursorPosition.Y} + if err := WriteConsoleOutput(h.fd, charInfo, size, position, ®ion); err != nil { + return err + } + h.drewMarginByte = true + } + return nil +} + +// cacheConsoleInfo ensures that the current console screen information has been queried +// since the last call to Flush(). It must be called before accessing h.curInfo or h.curPos. +func (h *windowsAnsiEventHandler) getCurrentInfo() (COORD, *CONSOLE_SCREEN_BUFFER_INFO, error) { + if h.curInfo == nil { + info, err := GetConsoleScreenBufferInfo(h.fd) + if err != nil { + return COORD{}, nil, err + } + h.curInfo = info + h.curPos = info.CursorPosition + } + return h.curPos, h.curInfo, nil +} + +func (h *windowsAnsiEventHandler) updatePos(pos COORD) { + if h.curInfo == nil { + panic("failed to call getCurrentInfo before calling updatePos") + } + h.curPos = pos +} + +// clearWrap clears the state where the cursor is in the margin +// waiting for the next character before wrapping the line. This must +// be done before most operations that act on the cursor. +func (h *windowsAnsiEventHandler) clearWrap() { + h.wrapNext = false + h.drewMarginByte = false +} diff --git a/vendor/github.com/BurntSushi/toml/.gitignore b/vendor/github.com/BurntSushi/toml/.gitignore new file mode 100644 index 0000000000..cd11be9653 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/.gitignore @@ -0,0 +1,2 @@ +toml.test +/toml-test diff --git a/vendor/github.com/BurntSushi/toml/COMPATIBLE b/vendor/github.com/BurntSushi/toml/COMPATIBLE new file mode 100644 index 0000000000..f621b01196 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/COMPATIBLE @@ -0,0 +1 @@ +Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0). diff --git a/vendor/github.com/BurntSushi/toml/COPYING b/vendor/github.com/BurntSushi/toml/COPYING new file mode 100644 index 0000000000..01b5743200 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/COPYING @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 TOML authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/BurntSushi/toml/README.md b/vendor/github.com/BurntSushi/toml/README.md new file mode 100644 index 0000000000..64410cf755 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/README.md @@ -0,0 +1,220 @@ +## TOML parser and encoder for Go with reflection + +TOML stands for Tom's Obvious, Minimal Language. This Go package provides a +reflection interface similar to Go's standard library `json` and `xml` +packages. This package also supports the `encoding.TextUnmarshaler` and +`encoding.TextMarshaler` interfaces so that you can define custom data +representations. (There is an example of this below.) + +Compatible with TOML version [v1.0.0](https://toml.io/en/v1.0.0). + +Documentation: https://godocs.io/github.com/BurntSushi/toml + +See the [releases page](https://github.com/BurntSushi/toml/releases) for a +changelog; this information is also in the git tag annotations (e.g. `git show +v0.4.0`). + +This library requires Go 1.13 or newer; install it with: + + $ go get github.com/BurntSushi/toml + +It also comes with a TOML validator CLI tool: + + $ go get github.com/BurntSushi/toml/cmd/tomlv + $ tomlv some-toml-file.toml + +### Testing + +This package passes all tests in +[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder +and the encoder. + +### Examples + +This package works similarly to how the Go standard library handles XML and +JSON. Namely, data is loaded into Go values via reflection. + +For the simplest example, consider some TOML file as just a list of keys +and values: + +```toml +Age = 25 +Cats = [ "Cauchy", "Plato" ] +Pi = 3.14 +Perfection = [ 6, 28, 496, 8128 ] +DOB = 1987-07-05T05:45:00Z +``` + +Which could be defined in Go as: + +```go +type Config struct { + Age int + Cats []string + Pi float64 + Perfection []int + DOB time.Time // requires `import time` +} +``` + +And then decoded with: + +```go +var conf Config +if _, err := toml.Decode(tomlData, &conf); err != nil { + // handle error +} +``` + +You can also use struct tags if your struct field name doesn't map to a TOML +key value directly: + +```toml +some_key_NAME = "wat" +``` + +```go +type TOML struct { + ObscureKey string `toml:"some_key_NAME"` +} +``` + +Beware that like other most other decoders **only exported fields** are +considered when encoding and decoding; private fields are silently ignored. + +### Using the `encoding.TextUnmarshaler` interface + +Here's an example that automatically parses duration strings into +`time.Duration` values: + +```toml +[[song]] +name = "Thunder Road" +duration = "4m49s" + +[[song]] +name = "Stairway to Heaven" +duration = "8m03s" +``` + +Which can be decoded with: + +```go +type song struct { + Name string + Duration duration +} +type songs struct { + Song []song +} +var favorites songs +if _, err := toml.Decode(blob, &favorites); err != nil { + log.Fatal(err) +} + +for _, s := range favorites.Song { + fmt.Printf("%s (%s)\n", s.Name, s.Duration) +} +``` + +And you'll also need a `duration` type that satisfies the +`encoding.TextUnmarshaler` interface: + +```go +type duration struct { + time.Duration +} + +func (d *duration) UnmarshalText(text []byte) error { + var err error + d.Duration, err = time.ParseDuration(string(text)) + return err +} +``` + +To target TOML specifically you can implement `UnmarshalTOML` TOML interface in +a similar way. + +### More complex usage + +Here's an example of how to load the example from the official spec page: + +```toml +# This is a TOML document. Boom. + +title = "TOML Example" + +[owner] +name = "Tom Preston-Werner" +organization = "GitHub" +bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." +dob = 1979-05-27T07:32:00Z # First class dates? Why not? + +[database] +server = "192.168.1.1" +ports = [ 8001, 8001, 8002 ] +connection_max = 5000 +enabled = true + +[servers] + + # You can indent as you please. Tabs or spaces. TOML don't care. + [servers.alpha] + ip = "10.0.0.1" + dc = "eqdc10" + + [servers.beta] + ip = "10.0.0.2" + dc = "eqdc10" + +[clients] +data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it + +# Line breaks are OK when inside arrays +hosts = [ + "alpha", + "omega" +] +``` + +And the corresponding Go types are: + +```go +type tomlConfig struct { + Title string + Owner ownerInfo + DB database `toml:"database"` + Servers map[string]server + Clients clients +} + +type ownerInfo struct { + Name string + Org string `toml:"organization"` + Bio string + DOB time.Time +} + +type database struct { + Server string + Ports []int + ConnMax int `toml:"connection_max"` + Enabled bool +} + +type server struct { + IP string + DC string +} + +type clients struct { + Data [][]interface{} + Hosts []string +} +``` + +Note that a case insensitive match will be tried if an exact match can't be +found. + +A working example of the above can be found in `_examples/example.{go,toml}`. + diff --git a/vendor/github.com/BurntSushi/toml/decode.go b/vendor/github.com/BurntSushi/toml/decode.go new file mode 100644 index 0000000000..d3d3b8397c --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/decode.go @@ -0,0 +1,511 @@ +package toml + +import ( + "encoding" + "fmt" + "io" + "io/ioutil" + "math" + "os" + "reflect" + "strings" + "time" +) + +// Unmarshaler is the interface implemented by objects that can unmarshal a +// TOML description of themselves. +type Unmarshaler interface { + UnmarshalTOML(interface{}) error +} + +// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`. +func Unmarshal(p []byte, v interface{}) error { + _, err := Decode(string(p), v) + return err +} + +// Primitive is a TOML value that hasn't been decoded into a Go value. +// +// This type can be used for any value, which will cause decoding to be delayed. +// You can use the PrimitiveDecode() function to "manually" decode these values. +// +// NOTE: The underlying representation of a `Primitive` value is subject to +// change. Do not rely on it. +// +// NOTE: Primitive values are still parsed, so using them will only avoid the +// overhead of reflection. They can be useful when you don't know the exact type +// of TOML data until runtime. +type Primitive struct { + undecoded interface{} + context Key +} + +// PrimitiveDecode is just like the other `Decode*` functions, except it +// decodes a TOML value that has already been parsed. Valid primitive values +// can *only* be obtained from values filled by the decoder functions, +// including this method. (i.e., `v` may contain more `Primitive` +// values.) +// +// Meta data for primitive values is included in the meta data returned by +// the `Decode*` functions with one exception: keys returned by the Undecoded +// method will only reflect keys that were decoded. Namely, any keys hidden +// behind a Primitive will be considered undecoded. Executing this method will +// update the undecoded keys in the meta data. (See the example.) +func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { + md.context = primValue.context + defer func() { md.context = nil }() + return md.unify(primValue.undecoded, rvalue(v)) +} + +// Decoder decodes TOML data. +// +// TOML tables correspond to Go structs or maps (dealer's choice – they can be +// used interchangeably). +// +// TOML table arrays correspond to either a slice of structs or a slice of maps. +// +// TOML datetimes correspond to Go time.Time values. Local datetimes are parsed +// in the local timezone. +// +// All other TOML types (float, string, int, bool and array) correspond to the +// obvious Go types. +// +// An exception to the above rules is if a type implements the TextUnmarshaler +// interface, in which case any primitive TOML value (floats, strings, integers, +// booleans, datetimes) will be converted to a []byte and given to the value's +// UnmarshalText method. See the Unmarshaler example for a demonstration with +// time duration strings. +// +// Key mapping +// +// TOML keys can map to either keys in a Go map or field names in a Go struct. +// The special `toml` struct tag can be used to map TOML keys to struct fields +// that don't match the key name exactly (see the example). A case insensitive +// match to struct names will be tried if an exact match can't be found. +// +// The mapping between TOML values and Go values is loose. That is, there may +// exist TOML values that cannot be placed into your representation, and there +// may be parts of your representation that do not correspond to TOML values. +// This loose mapping can be made stricter by using the IsDefined and/or +// Undecoded methods on the MetaData returned. +// +// This decoder does not handle cyclic types. Decode will not terminate if a +// cyclic type is passed. +type Decoder struct { + r io.Reader +} + +// NewDecoder creates a new Decoder. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{r: r} +} + +// Decode TOML data in to the pointer `v`. +func (dec *Decoder) Decode(v interface{}) (MetaData, error) { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr { + return MetaData{}, e("Decode of non-pointer %s", reflect.TypeOf(v)) + } + if rv.IsNil() { + return MetaData{}, e("Decode of nil %s", reflect.TypeOf(v)) + } + + // TODO: have parser should read from io.Reader? Or at the very least, make + // it read from []byte rather than string + data, err := ioutil.ReadAll(dec.r) + if err != nil { + return MetaData{}, err + } + + p, err := parse(string(data)) + if err != nil { + return MetaData{}, err + } + md := MetaData{ + p.mapping, p.types, p.ordered, + make(map[string]bool, len(p.ordered)), nil, + } + return md, md.unify(p.mapping, indirect(rv)) +} + +// Decode the TOML data in to the pointer v. +// +// See the documentation on Decoder for a description of the decoding process. +func Decode(data string, v interface{}) (MetaData, error) { + return NewDecoder(strings.NewReader(data)).Decode(v) +} + +// DecodeFile is just like Decode, except it will automatically read the +// contents of the file at path and decode it for you. +func DecodeFile(path string, v interface{}) (MetaData, error) { + fp, err := os.Open(path) + if err != nil { + return MetaData{}, err + } + defer fp.Close() + return NewDecoder(fp).Decode(v) +} + +// unify performs a sort of type unification based on the structure of `rv`, +// which is the client representation. +// +// Any type mismatch produces an error. Finding a type that we don't know +// how to handle produces an unsupported type error. +func (md *MetaData) unify(data interface{}, rv reflect.Value) error { + // Special case. Look for a `Primitive` value. + // TODO: #76 would make this superfluous after implemented. + if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() { + // Save the undecoded data and the key context into the primitive + // value. + context := make(Key, len(md.context)) + copy(context, md.context) + rv.Set(reflect.ValueOf(Primitive{ + undecoded: data, + context: context, + })) + return nil + } + + // Special case. Unmarshaler Interface support. + if rv.CanAddr() { + if v, ok := rv.Addr().Interface().(Unmarshaler); ok { + return v.UnmarshalTOML(data) + } + } + + // Special case. Look for a value satisfying the TextUnmarshaler interface. + if v, ok := rv.Interface().(encoding.TextUnmarshaler); ok { + return md.unifyText(data, v) + } + // TODO: + // The behavior here is incorrect whenever a Go type satisfies the + // encoding.TextUnmarshaler interface but also corresponds to a TOML hash or + // array. In particular, the unmarshaler should only be applied to primitive + // TOML values. But at this point, it will be applied to all kinds of values + // and produce an incorrect error whenever those values are hashes or arrays + // (including arrays of tables). + + k := rv.Kind() + + // laziness + if k >= reflect.Int && k <= reflect.Uint64 { + return md.unifyInt(data, rv) + } + switch k { + case reflect.Ptr: + elem := reflect.New(rv.Type().Elem()) + err := md.unify(data, reflect.Indirect(elem)) + if err != nil { + return err + } + rv.Set(elem) + return nil + case reflect.Struct: + return md.unifyStruct(data, rv) + case reflect.Map: + return md.unifyMap(data, rv) + case reflect.Array: + return md.unifyArray(data, rv) + case reflect.Slice: + return md.unifySlice(data, rv) + case reflect.String: + return md.unifyString(data, rv) + case reflect.Bool: + return md.unifyBool(data, rv) + case reflect.Interface: + // we only support empty interfaces. + if rv.NumMethod() > 0 { + return e("unsupported type %s", rv.Type()) + } + return md.unifyAnything(data, rv) + case reflect.Float32: + fallthrough + case reflect.Float64: + return md.unifyFloat64(data, rv) + } + return e("unsupported type %s", rv.Kind()) +} + +func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { + tmap, ok := mapping.(map[string]interface{}) + if !ok { + if mapping == nil { + return nil + } + return e("type mismatch for %s: expected table but found %T", + rv.Type().String(), mapping) + } + + for key, datum := range tmap { + var f *field + fields := cachedTypeFields(rv.Type()) + for i := range fields { + ff := &fields[i] + if ff.name == key { + f = ff + break + } + if f == nil && strings.EqualFold(ff.name, key) { + f = ff + } + } + if f != nil { + subv := rv + for _, i := range f.index { + subv = indirect(subv.Field(i)) + } + if isUnifiable(subv) { + md.decoded[md.context.add(key).String()] = true + md.context = append(md.context, key) + if err := md.unify(datum, subv); err != nil { + return err + } + md.context = md.context[0 : len(md.context)-1] + } else if f.name != "" { + // Bad user! No soup for you! + return e("cannot write unexported field %s.%s", + rv.Type().String(), f.name) + } + } + } + return nil +} + +func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { + if k := rv.Type().Key().Kind(); k != reflect.String { + return fmt.Errorf( + "toml: cannot decode to a map with non-string key type (%s in %q)", + k, rv.Type()) + } + + tmap, ok := mapping.(map[string]interface{}) + if !ok { + if tmap == nil { + return nil + } + return badtype("map", mapping) + } + if rv.IsNil() { + rv.Set(reflect.MakeMap(rv.Type())) + } + for k, v := range tmap { + md.decoded[md.context.add(k).String()] = true + md.context = append(md.context, k) + + rvkey := indirect(reflect.New(rv.Type().Key())) + rvval := reflect.Indirect(reflect.New(rv.Type().Elem())) + if err := md.unify(v, rvval); err != nil { + return err + } + md.context = md.context[0 : len(md.context)-1] + + rvkey.SetString(k) + rv.SetMapIndex(rvkey, rvval) + } + return nil +} + +func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error { + datav := reflect.ValueOf(data) + if datav.Kind() != reflect.Slice { + if !datav.IsValid() { + return nil + } + return badtype("slice", data) + } + if l := datav.Len(); l != rv.Len() { + return e("expected array length %d; got TOML array of length %d", rv.Len(), l) + } + return md.unifySliceArray(datav, rv) +} + +func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error { + datav := reflect.ValueOf(data) + if datav.Kind() != reflect.Slice { + if !datav.IsValid() { + return nil + } + return badtype("slice", data) + } + n := datav.Len() + if rv.IsNil() || rv.Cap() < n { + rv.Set(reflect.MakeSlice(rv.Type(), n, n)) + } + rv.SetLen(n) + return md.unifySliceArray(datav, rv) +} + +func (md *MetaData) unifySliceArray(data, rv reflect.Value) error { + l := data.Len() + for i := 0; i < l; i++ { + err := md.unify(data.Index(i).Interface(), indirect(rv.Index(i))) + if err != nil { + return err + } + } + return nil +} + +func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error { + if _, ok := data.(time.Time); ok { + rv.Set(reflect.ValueOf(data)) + return nil + } + return badtype("time.Time", data) +} + +func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { + if s, ok := data.(string); ok { + rv.SetString(s) + return nil + } + return badtype("string", data) +} + +func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { + if num, ok := data.(float64); ok { + switch rv.Kind() { + case reflect.Float32: + fallthrough + case reflect.Float64: + rv.SetFloat(num) + default: + panic("bug") + } + return nil + } + return badtype("float", data) +} + +func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error { + if num, ok := data.(int64); ok { + if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 { + switch rv.Kind() { + case reflect.Int, reflect.Int64: + // No bounds checking necessary. + case reflect.Int8: + if num < math.MinInt8 || num > math.MaxInt8 { + return e("value %d is out of range for int8", num) + } + case reflect.Int16: + if num < math.MinInt16 || num > math.MaxInt16 { + return e("value %d is out of range for int16", num) + } + case reflect.Int32: + if num < math.MinInt32 || num > math.MaxInt32 { + return e("value %d is out of range for int32", num) + } + } + rv.SetInt(num) + } else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 { + unum := uint64(num) + switch rv.Kind() { + case reflect.Uint, reflect.Uint64: + // No bounds checking necessary. + case reflect.Uint8: + if num < 0 || unum > math.MaxUint8 { + return e("value %d is out of range for uint8", num) + } + case reflect.Uint16: + if num < 0 || unum > math.MaxUint16 { + return e("value %d is out of range for uint16", num) + } + case reflect.Uint32: + if num < 0 || unum > math.MaxUint32 { + return e("value %d is out of range for uint32", num) + } + } + rv.SetUint(unum) + } else { + panic("unreachable") + } + return nil + } + return badtype("integer", data) +} + +func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { + if b, ok := data.(bool); ok { + rv.SetBool(b) + return nil + } + return badtype("boolean", data) +} + +func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error { + rv.Set(reflect.ValueOf(data)) + return nil +} + +func (md *MetaData) unifyText(data interface{}, v encoding.TextUnmarshaler) error { + var s string + switch sdata := data.(type) { + case TextMarshaler: + text, err := sdata.MarshalText() + if err != nil { + return err + } + s = string(text) + case fmt.Stringer: + s = sdata.String() + case string: + s = sdata + case bool: + s = fmt.Sprintf("%v", sdata) + case int64: + s = fmt.Sprintf("%d", sdata) + case float64: + s = fmt.Sprintf("%f", sdata) + default: + return badtype("primitive (string-like)", data) + } + if err := v.UnmarshalText([]byte(s)); err != nil { + return err + } + return nil +} + +// rvalue returns a reflect.Value of `v`. All pointers are resolved. +func rvalue(v interface{}) reflect.Value { + return indirect(reflect.ValueOf(v)) +} + +// indirect returns the value pointed to by a pointer. +// Pointers are followed until the value is not a pointer. +// New values are allocated for each nil pointer. +// +// An exception to this rule is if the value satisfies an interface of +// interest to us (like encoding.TextUnmarshaler). +func indirect(v reflect.Value) reflect.Value { + if v.Kind() != reflect.Ptr { + if v.CanSet() { + pv := v.Addr() + if _, ok := pv.Interface().(encoding.TextUnmarshaler); ok { + return pv + } + } + return v + } + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + return indirect(reflect.Indirect(v)) +} + +func isUnifiable(rv reflect.Value) bool { + if rv.CanSet() { + return true + } + if _, ok := rv.Interface().(encoding.TextUnmarshaler); ok { + return true + } + return false +} + +func e(format string, args ...interface{}) error { + return fmt.Errorf("toml: "+format, args...) +} + +func badtype(expected string, data interface{}) error { + return e("cannot load TOML value of type %T into a Go %s", data, expected) +} diff --git a/vendor/github.com/BurntSushi/toml/decode_go116.go b/vendor/github.com/BurntSushi/toml/decode_go116.go new file mode 100644 index 0000000000..38aa75fdce --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/decode_go116.go @@ -0,0 +1,18 @@ +// +build go1.16 + +package toml + +import ( + "io/fs" +) + +// DecodeFS is just like Decode, except it will automatically read the contents +// of the file at `path` from a fs.FS instance. +func DecodeFS(fsys fs.FS, path string, v interface{}) (MetaData, error) { + fp, err := fsys.Open(path) + if err != nil { + return MetaData{}, err + } + defer fp.Close() + return NewDecoder(fp).Decode(v) +} diff --git a/vendor/github.com/BurntSushi/toml/decode_meta.go b/vendor/github.com/BurntSushi/toml/decode_meta.go new file mode 100644 index 0000000000..ad8899c6cf --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/decode_meta.go @@ -0,0 +1,123 @@ +package toml + +import "strings" + +// MetaData allows access to meta information about TOML data that may not be +// inferable via reflection. In particular, whether a key has been defined and +// the TOML type of a key. +type MetaData struct { + mapping map[string]interface{} + types map[string]tomlType + keys []Key + decoded map[string]bool + context Key // Used only during decoding. +} + +// IsDefined reports if the key exists in the TOML data. +// +// The key should be specified hierarchically, for example to access the TOML +// key "a.b.c" you would use: +// +// IsDefined("a", "b", "c") +// +// IsDefined will return false if an empty key given. Keys are case sensitive. +func (md *MetaData) IsDefined(key ...string) bool { + if len(key) == 0 { + return false + } + + var hash map[string]interface{} + var ok bool + var hashOrVal interface{} = md.mapping + for _, k := range key { + if hash, ok = hashOrVal.(map[string]interface{}); !ok { + return false + } + if hashOrVal, ok = hash[k]; !ok { + return false + } + } + return true +} + +// Type returns a string representation of the type of the key specified. +// +// Type will return the empty string if given an empty key or a key that does +// not exist. Keys are case sensitive. +func (md *MetaData) Type(key ...string) string { + fullkey := strings.Join(key, ".") + if typ, ok := md.types[fullkey]; ok { + return typ.typeString() + } + return "" +} + +// Key represents any TOML key, including key groups. Use (MetaData).Keys to get +// values of this type. +type Key []string + +func (k Key) String() string { return strings.Join(k, ".") } + +func (k Key) maybeQuotedAll() string { + var ss []string + for i := range k { + ss = append(ss, k.maybeQuoted(i)) + } + return strings.Join(ss, ".") +} + +func (k Key) maybeQuoted(i int) string { + if k[i] == "" { + return `""` + } + quote := false + for _, c := range k[i] { + if !isBareKeyChar(c) { + quote = true + break + } + } + if quote { + return `"` + quotedReplacer.Replace(k[i]) + `"` + } + return k[i] +} + +func (k Key) add(piece string) Key { + newKey := make(Key, len(k)+1) + copy(newKey, k) + newKey[len(k)] = piece + return newKey +} + +// Keys returns a slice of every key in the TOML data, including key groups. +// +// Each key is itself a slice, where the first element is the top of the +// hierarchy and the last is the most specific. The list will have the same +// order as the keys appeared in the TOML data. +// +// All keys returned are non-empty. +func (md *MetaData) Keys() []Key { + return md.keys +} + +// Undecoded returns all keys that have not been decoded in the order in which +// they appear in the original TOML document. +// +// This includes keys that haven't been decoded because of a Primitive value. +// Once the Primitive value is decoded, the keys will be considered decoded. +// +// Also note that decoding into an empty interface will result in no decoding, +// and so no keys will be considered decoded. +// +// In this sense, the Undecoded keys correspond to keys in the TOML document +// that do not have a concrete type in your representation. +func (md *MetaData) Undecoded() []Key { + undecoded := make([]Key, 0, len(md.keys)) + for _, key := range md.keys { + if !md.decoded[key.String()] { + undecoded = append(undecoded, key) + } + } + return undecoded +} diff --git a/vendor/github.com/BurntSushi/toml/deprecated.go b/vendor/github.com/BurntSushi/toml/deprecated.go new file mode 100644 index 0000000000..db89eac1d1 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/deprecated.go @@ -0,0 +1,33 @@ +package toml + +import ( + "encoding" + "io" +) + +// DEPRECATED! +// +// Use the identical encoding.TextMarshaler instead. It is defined here to +// support Go 1.1 and older. +type TextMarshaler encoding.TextMarshaler + +// DEPRECATED! +// +// Use the identical encoding.TextUnmarshaler instead. It is defined here to +// support Go 1.1 and older. +type TextUnmarshaler encoding.TextUnmarshaler + +// DEPRECATED! +// +// Use MetaData.PrimitiveDecode instead. +func PrimitiveDecode(primValue Primitive, v interface{}) error { + md := MetaData{decoded: make(map[string]bool)} + return md.unify(primValue.undecoded, rvalue(v)) +} + +// DEPRECATED! +// +// Use NewDecoder(reader).Decode(&v) instead. +func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { + return NewDecoder(r).Decode(v) +} diff --git a/vendor/github.com/BurntSushi/toml/doc.go b/vendor/github.com/BurntSushi/toml/doc.go new file mode 100644 index 0000000000..099c4a77d2 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/doc.go @@ -0,0 +1,13 @@ +/* +Package toml implements decoding and encoding of TOML files. + +This package supports TOML v1.0.0, as listed on https://toml.io + +There is also support for delaying decoding with the Primitive type, and +querying the set of keys in a TOML document with the MetaData type. + +The github.com/BurntSushi/toml/cmd/tomlv package implements a TOML validator, +and can be used to verify if TOML document is valid. It can also be used to +print the type of each key. +*/ +package toml diff --git a/vendor/github.com/BurntSushi/toml/encode.go b/vendor/github.com/BurntSushi/toml/encode.go new file mode 100644 index 0000000000..10d88ac632 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/encode.go @@ -0,0 +1,650 @@ +package toml + +import ( + "bufio" + "encoding" + "errors" + "fmt" + "io" + "math" + "reflect" + "sort" + "strconv" + "strings" + "time" + + "github.com/BurntSushi/toml/internal" +) + +type tomlEncodeError struct{ error } + +var ( + errArrayNilElement = errors.New("toml: cannot encode array with nil element") + errNonString = errors.New("toml: cannot encode a map with non-string key type") + errAnonNonStruct = errors.New("toml: cannot encode an anonymous field that is not a struct") + errNoKey = errors.New("toml: top-level values must be Go maps or structs") + errAnything = errors.New("") // used in testing +) + +var quotedReplacer = strings.NewReplacer( + "\"", "\\\"", + "\\", "\\\\", + "\x00", `\u0000`, + "\x01", `\u0001`, + "\x02", `\u0002`, + "\x03", `\u0003`, + "\x04", `\u0004`, + "\x05", `\u0005`, + "\x06", `\u0006`, + "\x07", `\u0007`, + "\b", `\b`, + "\t", `\t`, + "\n", `\n`, + "\x0b", `\u000b`, + "\f", `\f`, + "\r", `\r`, + "\x0e", `\u000e`, + "\x0f", `\u000f`, + "\x10", `\u0010`, + "\x11", `\u0011`, + "\x12", `\u0012`, + "\x13", `\u0013`, + "\x14", `\u0014`, + "\x15", `\u0015`, + "\x16", `\u0016`, + "\x17", `\u0017`, + "\x18", `\u0018`, + "\x19", `\u0019`, + "\x1a", `\u001a`, + "\x1b", `\u001b`, + "\x1c", `\u001c`, + "\x1d", `\u001d`, + "\x1e", `\u001e`, + "\x1f", `\u001f`, + "\x7f", `\u007f`, +) + +// Encoder encodes a Go to a TOML document. +// +// The mapping between Go values and TOML values should be precisely the same as +// for the Decode* functions. Similarly, the TextMarshaler interface is +// supported by encoding the resulting bytes as strings. If you want to write +// arbitrary binary data then you will need to use something like base64 since +// TOML does not have any binary types. +// +// When encoding TOML hashes (Go maps or structs), keys without any sub-hashes +// are encoded first. +// +// Go maps will be sorted alphabetically by key for deterministic output. +// +// Encoding Go values without a corresponding TOML representation will return an +// error. Examples of this includes maps with non-string keys, slices with nil +// elements, embedded non-struct types, and nested slices containing maps or +// structs. (e.g. [][]map[string]string is not allowed but []map[string]string +// is okay, as is []map[string][]string). +// +// NOTE: Only exported keys are encoded due to the use of reflection. Unexported +// keys are silently discarded. +type Encoder struct { + // The string to use for a single indentation level. The default is two + // spaces. + Indent string + + // hasWritten is whether we have written any output to w yet. + hasWritten bool + w *bufio.Writer +} + +// NewEncoder create a new Encoder. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + w: bufio.NewWriter(w), + Indent: " ", + } +} + +// Encode writes a TOML representation of the Go value to the Encoder's writer. +// +// An error is returned if the value given cannot be encoded to a valid TOML +// document. +func (enc *Encoder) Encode(v interface{}) error { + rv := eindirect(reflect.ValueOf(v)) + if err := enc.safeEncode(Key([]string{}), rv); err != nil { + return err + } + return enc.w.Flush() +} + +func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) { + defer func() { + if r := recover(); r != nil { + if terr, ok := r.(tomlEncodeError); ok { + err = terr.error + return + } + panic(r) + } + }() + enc.encode(key, rv) + return nil +} + +func (enc *Encoder) encode(key Key, rv reflect.Value) { + // Special case. Time needs to be in ISO8601 format. + // Special case. If we can marshal the type to text, then we used that. + // Basically, this prevents the encoder for handling these types as + // generic structs (or whatever the underlying type of a TextMarshaler is). + switch t := rv.Interface().(type) { + case time.Time, encoding.TextMarshaler: + enc.writeKeyValue(key, rv, false) + return + // TODO: #76 would make this superfluous after implemented. + case Primitive: + enc.encode(key, reflect.ValueOf(t.undecoded)) + return + } + + k := rv.Kind() + switch k { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64, + reflect.Float32, reflect.Float64, reflect.String, reflect.Bool: + enc.writeKeyValue(key, rv, false) + case reflect.Array, reflect.Slice: + if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) { + enc.eArrayOfTables(key, rv) + } else { + enc.writeKeyValue(key, rv, false) + } + case reflect.Interface: + if rv.IsNil() { + return + } + enc.encode(key, rv.Elem()) + case reflect.Map: + if rv.IsNil() { + return + } + enc.eTable(key, rv) + case reflect.Ptr: + if rv.IsNil() { + return + } + enc.encode(key, rv.Elem()) + case reflect.Struct: + enc.eTable(key, rv) + default: + encPanic(fmt.Errorf("unsupported type for key '%s': %s", key, k)) + } +} + +// eElement encodes any value that can be an array element. +func (enc *Encoder) eElement(rv reflect.Value) { + switch v := rv.Interface().(type) { + case time.Time: // Using TextMarshaler adds extra quotes, which we don't want. + format := time.RFC3339Nano + switch v.Location() { + case internal.LocalDatetime: + format = "2006-01-02T15:04:05.999999999" + case internal.LocalDate: + format = "2006-01-02" + case internal.LocalTime: + format = "15:04:05.999999999" + } + switch v.Location() { + default: + enc.wf(v.Format(format)) + case internal.LocalDatetime, internal.LocalDate, internal.LocalTime: + enc.wf(v.In(time.UTC).Format(format)) + } + return + case encoding.TextMarshaler: + // Use text marshaler if it's available for this value. + if s, err := v.MarshalText(); err != nil { + encPanic(err) + } else { + enc.writeQuoted(string(s)) + } + return + } + + switch rv.Kind() { + case reflect.String: + enc.writeQuoted(rv.String()) + case reflect.Bool: + enc.wf(strconv.FormatBool(rv.Bool())) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + enc.wf(strconv.FormatInt(rv.Int(), 10)) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + enc.wf(strconv.FormatUint(rv.Uint(), 10)) + case reflect.Float32: + f := rv.Float() + if math.IsNaN(f) { + enc.wf("nan") + } else if math.IsInf(f, 0) { + enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)]) + } else { + enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 32))) + } + case reflect.Float64: + f := rv.Float() + if math.IsNaN(f) { + enc.wf("nan") + } else if math.IsInf(f, 0) { + enc.wf("%cinf", map[bool]byte{true: '-', false: '+'}[math.Signbit(f)]) + } else { + enc.wf(floatAddDecimal(strconv.FormatFloat(f, 'f', -1, 64))) + } + case reflect.Array, reflect.Slice: + enc.eArrayOrSliceElement(rv) + case reflect.Struct: + enc.eStruct(nil, rv, true) + case reflect.Map: + enc.eMap(nil, rv, true) + case reflect.Interface: + enc.eElement(rv.Elem()) + default: + encPanic(fmt.Errorf("unexpected primitive type: %T", rv.Interface())) + } +} + +// By the TOML spec, all floats must have a decimal with at least one number on +// either side. +func floatAddDecimal(fstr string) string { + if !strings.Contains(fstr, ".") { + return fstr + ".0" + } + return fstr +} + +func (enc *Encoder) writeQuoted(s string) { + enc.wf("\"%s\"", quotedReplacer.Replace(s)) +} + +func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) { + length := rv.Len() + enc.wf("[") + for i := 0; i < length; i++ { + elem := rv.Index(i) + enc.eElement(elem) + if i != length-1 { + enc.wf(", ") + } + } + enc.wf("]") +} + +func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { + if len(key) == 0 { + encPanic(errNoKey) + } + for i := 0; i < rv.Len(); i++ { + trv := rv.Index(i) + if isNil(trv) { + continue + } + enc.newline() + enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll()) + enc.newline() + enc.eMapOrStruct(key, trv, false) + } +} + +func (enc *Encoder) eTable(key Key, rv reflect.Value) { + if len(key) == 1 { + // Output an extra newline between top-level tables. + // (The newline isn't written if nothing else has been written though.) + enc.newline() + } + if len(key) > 0 { + enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll()) + enc.newline() + } + enc.eMapOrStruct(key, rv, false) +} + +func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value, inline bool) { + switch rv := eindirect(rv); rv.Kind() { + case reflect.Map: + enc.eMap(key, rv, inline) + case reflect.Struct: + enc.eStruct(key, rv, inline) + default: + // Should never happen? + panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String()) + } +} + +func (enc *Encoder) eMap(key Key, rv reflect.Value, inline bool) { + rt := rv.Type() + if rt.Key().Kind() != reflect.String { + encPanic(errNonString) + } + + // Sort keys so that we have deterministic output. And write keys directly + // underneath this key first, before writing sub-structs or sub-maps. + var mapKeysDirect, mapKeysSub []string + for _, mapKey := range rv.MapKeys() { + k := mapKey.String() + if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) { + mapKeysSub = append(mapKeysSub, k) + } else { + mapKeysDirect = append(mapKeysDirect, k) + } + } + + var writeMapKeys = func(mapKeys []string, trailC bool) { + sort.Strings(mapKeys) + for i, mapKey := range mapKeys { + val := rv.MapIndex(reflect.ValueOf(mapKey)) + if isNil(val) { + continue + } + + if inline { + enc.writeKeyValue(Key{mapKey}, val, true) + if trailC || i != len(mapKeys)-1 { + enc.wf(", ") + } + } else { + enc.encode(key.add(mapKey), val) + } + } + } + + if inline { + enc.wf("{") + } + writeMapKeys(mapKeysDirect, len(mapKeysSub) > 0) + writeMapKeys(mapKeysSub, false) + if inline { + enc.wf("}") + } +} + +func (enc *Encoder) eStruct(key Key, rv reflect.Value, inline bool) { + // Write keys for fields directly under this key first, because if we write + // a field that creates a new table then all keys under it will be in that + // table (not the one we're writing here). + // + // Fields is a [][]int: for fieldsDirect this always has one entry (the + // struct index). For fieldsSub it contains two entries: the parent field + // index from tv, and the field indexes for the fields of the sub. + var ( + rt = rv.Type() + fieldsDirect, fieldsSub [][]int + addFields func(rt reflect.Type, rv reflect.Value, start []int) + ) + addFields = func(rt reflect.Type, rv reflect.Value, start []int) { + for i := 0; i < rt.NumField(); i++ { + f := rt.Field(i) + if f.PkgPath != "" && !f.Anonymous { /// Skip unexported fields. + continue + } + + frv := rv.Field(i) + + // Treat anonymous struct fields with tag names as though they are + // not anonymous, like encoding/json does. + // + // Non-struct anonymous fields use the normal encoding logic. + if f.Anonymous { + t := f.Type + switch t.Kind() { + case reflect.Struct: + if getOptions(f.Tag).name == "" { + addFields(t, frv, append(start, f.Index...)) + continue + } + case reflect.Ptr: + if t.Elem().Kind() == reflect.Struct && getOptions(f.Tag).name == "" { + if !frv.IsNil() { + addFields(t.Elem(), frv.Elem(), append(start, f.Index...)) + } + continue + } + } + } + + if typeIsHash(tomlTypeOfGo(frv)) { + fieldsSub = append(fieldsSub, append(start, f.Index...)) + } else { + fieldsDirect = append(fieldsDirect, append(start, f.Index...)) + } + } + } + addFields(rt, rv, nil) + + writeFields := func(fields [][]int) { + for _, fieldIndex := range fields { + fieldType := rt.FieldByIndex(fieldIndex) + fieldVal := rv.FieldByIndex(fieldIndex) + + if isNil(fieldVal) { /// Don't write anything for nil fields. + continue + } + + opts := getOptions(fieldType.Tag) + if opts.skip { + continue + } + keyName := fieldType.Name + if opts.name != "" { + keyName = opts.name + } + if opts.omitempty && isEmpty(fieldVal) { + continue + } + if opts.omitzero && isZero(fieldVal) { + continue + } + + if inline { + enc.writeKeyValue(Key{keyName}, fieldVal, true) + if fieldIndex[0] != len(fields)-1 { + enc.wf(", ") + } + } else { + enc.encode(key.add(keyName), fieldVal) + } + } + } + + if inline { + enc.wf("{") + } + writeFields(fieldsDirect) + writeFields(fieldsSub) + if inline { + enc.wf("}") + } +} + +// tomlTypeName returns the TOML type name of the Go value's type. It is +// used to determine whether the types of array elements are mixed (which is +// forbidden). If the Go value is nil, then it is illegal for it to be an array +// element, and valueIsNil is returned as true. + +// Returns the TOML type of a Go value. The type may be `nil`, which means +// no concrete TOML type could be found. +func tomlTypeOfGo(rv reflect.Value) tomlType { + if isNil(rv) || !rv.IsValid() { + return nil + } + switch rv.Kind() { + case reflect.Bool: + return tomlBool + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, + reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, + reflect.Uint64: + return tomlInteger + case reflect.Float32, reflect.Float64: + return tomlFloat + case reflect.Array, reflect.Slice: + if typeEqual(tomlHash, tomlArrayType(rv)) { + return tomlArrayHash + } + return tomlArray + case reflect.Ptr, reflect.Interface: + return tomlTypeOfGo(rv.Elem()) + case reflect.String: + return tomlString + case reflect.Map: + return tomlHash + case reflect.Struct: + switch rv.Interface().(type) { + case time.Time: + return tomlDatetime + case encoding.TextMarshaler: + return tomlString + default: + // Someone used a pointer receiver: we can make it work for pointer + // values. + if rv.CanAddr() { + _, ok := rv.Addr().Interface().(encoding.TextMarshaler) + if ok { + return tomlString + } + } + return tomlHash + } + default: + _, ok := rv.Interface().(encoding.TextMarshaler) + if ok { + return tomlString + } + encPanic(errors.New("unsupported type: " + rv.Kind().String())) + panic("") // Need *some* return value + } +} + +// tomlArrayType returns the element type of a TOML array. The type returned +// may be nil if it cannot be determined (e.g., a nil slice or a zero length +// slize). This function may also panic if it finds a type that cannot be +// expressed in TOML (such as nil elements, heterogeneous arrays or directly +// nested arrays of tables). +func tomlArrayType(rv reflect.Value) tomlType { + if isNil(rv) || !rv.IsValid() || rv.Len() == 0 { + return nil + } + + /// Don't allow nil. + rvlen := rv.Len() + for i := 1; i < rvlen; i++ { + if tomlTypeOfGo(rv.Index(i)) == nil { + encPanic(errArrayNilElement) + } + } + + firstType := tomlTypeOfGo(rv.Index(0)) + if firstType == nil { + encPanic(errArrayNilElement) + } + return firstType +} + +type tagOptions struct { + skip bool // "-" + name string + omitempty bool + omitzero bool +} + +func getOptions(tag reflect.StructTag) tagOptions { + t := tag.Get("toml") + if t == "-" { + return tagOptions{skip: true} + } + var opts tagOptions + parts := strings.Split(t, ",") + opts.name = parts[0] + for _, s := range parts[1:] { + switch s { + case "omitempty": + opts.omitempty = true + case "omitzero": + opts.omitzero = true + } + } + return opts +} + +func isZero(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return rv.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return rv.Uint() == 0 + case reflect.Float32, reflect.Float64: + return rv.Float() == 0.0 + } + return false +} + +func isEmpty(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Array, reflect.Slice, reflect.Map, reflect.String: + return rv.Len() == 0 + case reflect.Bool: + return !rv.Bool() + } + return false +} + +func (enc *Encoder) newline() { + if enc.hasWritten { + enc.wf("\n") + } +} + +// Write a key/value pair: +// +// key = +// +// If inline is true it won't add a newline at the end. +func (enc *Encoder) writeKeyValue(key Key, val reflect.Value, inline bool) { + if len(key) == 0 { + encPanic(errNoKey) + } + enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) + enc.eElement(val) + if !inline { + enc.newline() + } +} + +func (enc *Encoder) wf(format string, v ...interface{}) { + if _, err := fmt.Fprintf(enc.w, format, v...); err != nil { + encPanic(err) + } + enc.hasWritten = true +} + +func (enc *Encoder) indentStr(key Key) string { + return strings.Repeat(enc.Indent, len(key)-1) +} + +func encPanic(err error) { + panic(tomlEncodeError{err}) +} + +func eindirect(v reflect.Value) reflect.Value { + switch v.Kind() { + case reflect.Ptr, reflect.Interface: + return eindirect(v.Elem()) + default: + return v + } +} + +func isNil(rv reflect.Value) bool { + switch rv.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return rv.IsNil() + default: + return false + } +} diff --git a/vendor/github.com/BurntSushi/toml/internal/tz.go b/vendor/github.com/BurntSushi/toml/internal/tz.go new file mode 100644 index 0000000000..022f15bc2b --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/internal/tz.go @@ -0,0 +1,36 @@ +package internal + +import "time" + +// Timezones used for local datetime, date, and time TOML types. +// +// The exact way times and dates without a timezone should be interpreted is not +// well-defined in the TOML specification and left to the implementation. These +// defaults to current local timezone offset of the computer, but this can be +// changed by changing these variables before decoding. +// +// TODO: +// Ideally we'd like to offer people the ability to configure the used timezone +// by setting Decoder.Timezone and Encoder.Timezone; however, this is a bit +// tricky: the reason we use three different variables for this is to support +// round-tripping – without these specific TZ names we wouldn't know which +// format to use. +// +// There isn't a good way to encode this right now though, and passing this sort +// of information also ties in to various related issues such as string format +// encoding, encoding of comments, etc. +// +// So, for the time being, just put this in internal until we can write a good +// comprehensive API for doing all of this. +// +// The reason they're exported is because they're referred from in e.g. +// internal/tag. +// +// Note that this behaviour is valid according to the TOML spec as the exact +// behaviour is left up to implementations. +var ( + localOffset = func() int { _, o := time.Now().Zone(); return o }() + LocalDatetime = time.FixedZone("datetime-local", localOffset) + LocalDate = time.FixedZone("date-local", localOffset) + LocalTime = time.FixedZone("time-local", localOffset) +) diff --git a/vendor/github.com/BurntSushi/toml/lex.go b/vendor/github.com/BurntSushi/toml/lex.go new file mode 100644 index 0000000000..adc4eb5d53 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/lex.go @@ -0,0 +1,1225 @@ +package toml + +import ( + "fmt" + "reflect" + "runtime" + "strings" + "unicode" + "unicode/utf8" +) + +type itemType int + +const ( + itemError itemType = iota + itemNIL // used in the parser to indicate no type + itemEOF + itemText + itemString + itemRawString + itemMultilineString + itemRawMultilineString + itemBool + itemInteger + itemFloat + itemDatetime + itemArray // the start of an array + itemArrayEnd + itemTableStart + itemTableEnd + itemArrayTableStart + itemArrayTableEnd + itemKeyStart + itemKeyEnd + itemCommentStart + itemInlineTableStart + itemInlineTableEnd +) + +const ( + eof = 0 + comma = ',' + tableStart = '[' + tableEnd = ']' + arrayTableStart = '[' + arrayTableEnd = ']' + tableSep = '.' + keySep = '=' + arrayStart = '[' + arrayEnd = ']' + commentStart = '#' + stringStart = '"' + stringEnd = '"' + rawStringStart = '\'' + rawStringEnd = '\'' + inlineTableStart = '{' + inlineTableEnd = '}' +) + +type stateFn func(lx *lexer) stateFn + +type lexer struct { + input string + start int + pos int + line int + state stateFn + items chan item + + // Allow for backing up up to four runes. + // This is necessary because TOML contains 3-rune tokens (""" and '''). + prevWidths [4]int + nprev int // how many of prevWidths are in use + // If we emit an eof, we can still back up, but it is not OK to call + // next again. + atEOF bool + + // A stack of state functions used to maintain context. + // The idea is to reuse parts of the state machine in various places. + // For example, values can appear at the top level or within arbitrarily + // nested arrays. The last state on the stack is used after a value has + // been lexed. Similarly for comments. + stack []stateFn +} + +type item struct { + typ itemType + val string + line int +} + +func (lx *lexer) nextItem() item { + for { + select { + case item := <-lx.items: + return item + default: + lx.state = lx.state(lx) + //fmt.Printf(" STATE %-24s current: %-10q stack: %s\n", lx.state, lx.current(), lx.stack) + } + } +} + +func lex(input string) *lexer { + lx := &lexer{ + input: input, + state: lexTop, + line: 1, + items: make(chan item, 10), + stack: make([]stateFn, 0, 10), + } + return lx +} + +func (lx *lexer) push(state stateFn) { + lx.stack = append(lx.stack, state) +} + +func (lx *lexer) pop() stateFn { + if len(lx.stack) == 0 { + return lx.errorf("BUG in lexer: no states to pop") + } + last := lx.stack[len(lx.stack)-1] + lx.stack = lx.stack[0 : len(lx.stack)-1] + return last +} + +func (lx *lexer) current() string { + return lx.input[lx.start:lx.pos] +} + +func (lx *lexer) emit(typ itemType) { + lx.items <- item{typ, lx.current(), lx.line} + lx.start = lx.pos +} + +func (lx *lexer) emitTrim(typ itemType) { + lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line} + lx.start = lx.pos +} + +func (lx *lexer) next() (r rune) { + if lx.atEOF { + panic("BUG in lexer: next called after EOF") + } + if lx.pos >= len(lx.input) { + lx.atEOF = true + return eof + } + + if lx.input[lx.pos] == '\n' { + lx.line++ + } + lx.prevWidths[3] = lx.prevWidths[2] + lx.prevWidths[2] = lx.prevWidths[1] + lx.prevWidths[1] = lx.prevWidths[0] + if lx.nprev < 4 { + lx.nprev++ + } + + r, w := utf8.DecodeRuneInString(lx.input[lx.pos:]) + if r == utf8.RuneError { + lx.errorf("invalid UTF-8 byte at position %d (line %d): 0x%02x", lx.pos, lx.line, lx.input[lx.pos]) + return utf8.RuneError + } + + lx.prevWidths[0] = w + lx.pos += w + return r +} + +// ignore skips over the pending input before this point. +func (lx *lexer) ignore() { + lx.start = lx.pos +} + +// backup steps back one rune. Can be called 4 times between calls to next. +func (lx *lexer) backup() { + if lx.atEOF { + lx.atEOF = false + return + } + if lx.nprev < 1 { + panic("BUG in lexer: backed up too far") + } + w := lx.prevWidths[0] + lx.prevWidths[0] = lx.prevWidths[1] + lx.prevWidths[1] = lx.prevWidths[2] + lx.prevWidths[2] = lx.prevWidths[3] + lx.nprev-- + lx.pos -= w + if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' { + lx.line-- + } +} + +// accept consumes the next rune if it's equal to `valid`. +func (lx *lexer) accept(valid rune) bool { + if lx.next() == valid { + return true + } + lx.backup() + return false +} + +// peek returns but does not consume the next rune in the input. +func (lx *lexer) peek() rune { + r := lx.next() + lx.backup() + return r +} + +// skip ignores all input that matches the given predicate. +func (lx *lexer) skip(pred func(rune) bool) { + for { + r := lx.next() + if pred(r) { + continue + } + lx.backup() + lx.ignore() + return + } +} + +// errorf stops all lexing by emitting an error and returning `nil`. +// Note that any value that is a character is escaped if it's a special +// character (newlines, tabs, etc.). +func (lx *lexer) errorf(format string, values ...interface{}) stateFn { + lx.items <- item{ + itemError, + fmt.Sprintf(format, values...), + lx.line, + } + return nil +} + +// lexTop consumes elements at the top level of TOML data. +func lexTop(lx *lexer) stateFn { + r := lx.next() + if isWhitespace(r) || isNL(r) { + return lexSkip(lx, lexTop) + } + switch r { + case commentStart: + lx.push(lexTop) + return lexCommentStart + case tableStart: + return lexTableStart + case eof: + if lx.pos > lx.start { + return lx.errorf("unexpected EOF") + } + lx.emit(itemEOF) + return nil + } + + // At this point, the only valid item can be a key, so we back up + // and let the key lexer do the rest. + lx.backup() + lx.push(lexTopEnd) + return lexKeyStart +} + +// lexTopEnd is entered whenever a top-level item has been consumed. (A value +// or a table.) It must see only whitespace, and will turn back to lexTop +// upon a newline. If it sees EOF, it will quit the lexer successfully. +func lexTopEnd(lx *lexer) stateFn { + r := lx.next() + switch { + case r == commentStart: + // a comment will read to a newline for us. + lx.push(lexTop) + return lexCommentStart + case isWhitespace(r): + return lexTopEnd + case isNL(r): + lx.ignore() + return lexTop + case r == eof: + lx.emit(itemEOF) + return nil + } + return lx.errorf( + "expected a top-level item to end with a newline, comment, or EOF, but got %q instead", + r) +} + +// lexTable lexes the beginning of a table. Namely, it makes sure that +// it starts with a character other than '.' and ']'. +// It assumes that '[' has already been consumed. +// It also handles the case that this is an item in an array of tables. +// e.g., '[[name]]'. +func lexTableStart(lx *lexer) stateFn { + if lx.peek() == arrayTableStart { + lx.next() + lx.emit(itemArrayTableStart) + lx.push(lexArrayTableEnd) + } else { + lx.emit(itemTableStart) + lx.push(lexTableEnd) + } + return lexTableNameStart +} + +func lexTableEnd(lx *lexer) stateFn { + lx.emit(itemTableEnd) + return lexTopEnd +} + +func lexArrayTableEnd(lx *lexer) stateFn { + if r := lx.next(); r != arrayTableEnd { + return lx.errorf( + "expected end of table array name delimiter %q, but got %q instead", + arrayTableEnd, r) + } + lx.emit(itemArrayTableEnd) + return lexTopEnd +} + +func lexTableNameStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == tableEnd || r == eof: + return lx.errorf("unexpected end of table name (table names cannot be empty)") + case r == tableSep: + return lx.errorf("unexpected table separator (table names cannot be empty)") + case r == stringStart || r == rawStringStart: + lx.ignore() + lx.push(lexTableNameEnd) + return lexQuotedName + default: + lx.push(lexTableNameEnd) + return lexBareName + } +} + +// lexTableNameEnd reads the end of a piece of a table name, optionally +// consuming whitespace. +func lexTableNameEnd(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.next(); { + case isWhitespace(r): + return lexTableNameEnd + case r == tableSep: + lx.ignore() + return lexTableNameStart + case r == tableEnd: + return lx.pop() + default: + return lx.errorf("expected '.' or ']' to end table name, but got %q instead", r) + } +} + +// lexBareName lexes one part of a key or table. +// +// It assumes that at least one valid character for the table has already been +// read. +// +// Lexes only one part, e.g. only 'a' inside 'a.b'. +func lexBareName(lx *lexer) stateFn { + r := lx.next() + if isBareKeyChar(r) { + return lexBareName + } + lx.backup() + lx.emit(itemText) + return lx.pop() +} + +// lexBareName lexes one part of a key or table. +// +// It assumes that at least one valid character for the table has already been +// read. +// +// Lexes only one part, e.g. only '"a"' inside '"a".b'. +func lexQuotedName(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexValue) + case r == stringStart: + lx.ignore() // ignore the '"' + return lexString + case r == rawStringStart: + lx.ignore() // ignore the "'" + return lexRawString + case r == eof: + return lx.errorf("unexpected EOF; expected value") + default: + return lx.errorf("expected value but found %q instead", r) + } +} + +// lexKeyStart consumes all key parts until a '='. +func lexKeyStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == '=' || r == eof: + return lx.errorf("unexpected '=': key name appears blank") + case r == '.': + return lx.errorf("unexpected '.': keys cannot start with a '.'") + case r == stringStart || r == rawStringStart: + lx.ignore() + fallthrough + default: // Bare key + lx.emit(itemKeyStart) + return lexKeyNameStart + } +} + +func lexKeyNameStart(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.peek(); { + case r == '=' || r == eof: + return lx.errorf("unexpected '='") + case r == '.': + return lx.errorf("unexpected '.'") + case r == stringStart || r == rawStringStart: + lx.ignore() + lx.push(lexKeyEnd) + return lexQuotedName + default: + lx.push(lexKeyEnd) + return lexBareName + } +} + +// lexKeyEnd consumes the end of a key and trims whitespace (up to the key +// separator). +func lexKeyEnd(lx *lexer) stateFn { + lx.skip(isWhitespace) + switch r := lx.next(); { + case isWhitespace(r): + return lexSkip(lx, lexKeyEnd) + case r == eof: + return lx.errorf("unexpected EOF; expected key separator %q", keySep) + case r == '.': + lx.ignore() + return lexKeyNameStart + case r == '=': + lx.emit(itemKeyEnd) + return lexSkip(lx, lexValue) + default: + return lx.errorf("expected '.' or '=', but got %q instead", r) + } +} + +// lexValue starts the consumption of a value anywhere a value is expected. +// lexValue will ignore whitespace. +// After a value is lexed, the last state on the next is popped and returned. +func lexValue(lx *lexer) stateFn { + // We allow whitespace to precede a value, but NOT newlines. + // In array syntax, the array states are responsible for ignoring newlines. + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexValue) + case isDigit(r): + lx.backup() // avoid an extra state and use the same as above + return lexNumberOrDateStart + } + switch r { + case arrayStart: + lx.ignore() + lx.emit(itemArray) + return lexArrayValue + case inlineTableStart: + lx.ignore() + lx.emit(itemInlineTableStart) + return lexInlineTableValue + case stringStart: + if lx.accept(stringStart) { + if lx.accept(stringStart) { + lx.ignore() // Ignore """ + return lexMultilineString + } + lx.backup() + } + lx.ignore() // ignore the '"' + return lexString + case rawStringStart: + if lx.accept(rawStringStart) { + if lx.accept(rawStringStart) { + lx.ignore() // Ignore """ + return lexMultilineRawString + } + lx.backup() + } + lx.ignore() // ignore the "'" + return lexRawString + case '.': // special error case, be kind to users + return lx.errorf("floats must start with a digit, not '.'") + case 'i', 'n': + if (lx.accept('n') && lx.accept('f')) || (lx.accept('a') && lx.accept('n')) { + lx.emit(itemFloat) + return lx.pop() + } + case '-', '+': + return lexDecimalNumberStart + } + if unicode.IsLetter(r) { + // Be permissive here; lexBool will give a nice error if the + // user wrote something like + // x = foo + // (i.e. not 'true' or 'false' but is something else word-like.) + lx.backup() + return lexBool + } + if r == eof { + return lx.errorf("unexpected EOF; expected value") + } + return lx.errorf("expected value but found %q instead", r) +} + +// lexArrayValue consumes one value in an array. It assumes that '[' or ',' +// have already been consumed. All whitespace and newlines are ignored. +func lexArrayValue(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r) || isNL(r): + return lexSkip(lx, lexArrayValue) + case r == commentStart: + lx.push(lexArrayValue) + return lexCommentStart + case r == comma: + return lx.errorf("unexpected comma") + case r == arrayEnd: + // NOTE(caleb): The spec isn't clear about whether you can have + // a trailing comma or not, so we'll allow it. + return lexArrayEnd + } + + lx.backup() + lx.push(lexArrayValueEnd) + return lexValue +} + +// lexArrayValueEnd consumes everything between the end of an array value and +// the next value (or the end of the array): it ignores whitespace and newlines +// and expects either a ',' or a ']'. +func lexArrayValueEnd(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r) || isNL(r): + return lexSkip(lx, lexArrayValueEnd) + case r == commentStart: + lx.push(lexArrayValueEnd) + return lexCommentStart + case r == comma: + lx.ignore() + return lexArrayValue // move on to the next value + case r == arrayEnd: + return lexArrayEnd + } + return lx.errorf( + "expected a comma or array terminator %q, but got %s instead", + arrayEnd, runeOrEOF(r)) +} + +// lexArrayEnd finishes the lexing of an array. +// It assumes that a ']' has just been consumed. +func lexArrayEnd(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemArrayEnd) + return lx.pop() +} + +// lexInlineTableValue consumes one key/value pair in an inline table. +// It assumes that '{' or ',' have already been consumed. Whitespace is ignored. +func lexInlineTableValue(lx *lexer) stateFn { + r := lx.next() + switch { + case isWhitespace(r): + return lexSkip(lx, lexInlineTableValue) + case isNL(r): + return lx.errorf("newlines not allowed within inline tables") + case r == commentStart: + lx.push(lexInlineTableValue) + return lexCommentStart + case r == comma: + return lx.errorf("unexpected comma") + case r == inlineTableEnd: + return lexInlineTableEnd + } + lx.backup() + lx.push(lexInlineTableValueEnd) + return lexKeyStart +} + +// lexInlineTableValueEnd consumes everything between the end of an inline table +// key/value pair and the next pair (or the end of the table): +// it ignores whitespace and expects either a ',' or a '}'. +func lexInlineTableValueEnd(lx *lexer) stateFn { + switch r := lx.next(); { + case isWhitespace(r): + return lexSkip(lx, lexInlineTableValueEnd) + case isNL(r): + return lx.errorf("newlines not allowed within inline tables") + case r == commentStart: + lx.push(lexInlineTableValueEnd) + return lexCommentStart + case r == comma: + lx.ignore() + lx.skip(isWhitespace) + if lx.peek() == '}' { + return lx.errorf("trailing comma not allowed in inline tables") + } + return lexInlineTableValue + case r == inlineTableEnd: + return lexInlineTableEnd + default: + return lx.errorf( + "expected a comma or an inline table terminator %q, but got %s instead", + inlineTableEnd, runeOrEOF(r)) + } +} + +func runeOrEOF(r rune) string { + if r == eof { + return "end of file" + } + return "'" + string(r) + "'" +} + +// lexInlineTableEnd finishes the lexing of an inline table. +// It assumes that a '}' has just been consumed. +func lexInlineTableEnd(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemInlineTableEnd) + return lx.pop() +} + +// lexString consumes the inner contents of a string. It assumes that the +// beginning '"' has already been consumed and ignored. +func lexString(lx *lexer) stateFn { + r := lx.next() + switch { + case r == eof: + return lx.errorf(`unexpected EOF; expected '"'`) + case isControl(r) || r == '\r': + return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r) + case isNL(r): + return lx.errorf("strings cannot contain newlines") + case r == '\\': + lx.push(lexString) + return lexStringEscape + case r == stringEnd: + lx.backup() + lx.emit(itemString) + lx.next() + lx.ignore() + return lx.pop() + } + return lexString +} + +// lexMultilineString consumes the inner contents of a string. It assumes that +// the beginning '"""' has already been consumed and ignored. +func lexMultilineString(lx *lexer) stateFn { + r := lx.next() + switch r { + case eof: + return lx.errorf(`unexpected EOF; expected '"""'`) + case '\r': + if lx.peek() != '\n' { + return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r) + } + return lexMultilineString + case '\\': + return lexMultilineStringEscape + case stringEnd: + /// Found " → try to read two more "". + if lx.accept(stringEnd) { + if lx.accept(stringEnd) { + /// Peek ahead: the string can contain " and "", including at the + /// end: """str""""" + /// 6 or more at the end, however, is an error. + if lx.peek() == stringEnd { + /// Check if we already lexed 5 's; if so we have 6 now, and + /// that's just too many man! + if strings.HasSuffix(lx.current(), `"""""`) { + return lx.errorf(`unexpected '""""""'`) + } + lx.backup() + lx.backup() + return lexMultilineString + } + + lx.backup() /// backup: don't include the """ in the item. + lx.backup() + lx.backup() + lx.emit(itemMultilineString) + lx.next() /// Read over ''' again and discard it. + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + } + + if isControl(r) { + return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r) + } + return lexMultilineString +} + +// lexRawString consumes a raw string. Nothing can be escaped in such a string. +// It assumes that the beginning "'" has already been consumed and ignored. +func lexRawString(lx *lexer) stateFn { + r := lx.next() + switch { + case r == eof: + return lx.errorf(`unexpected EOF; expected "'"`) + case isControl(r) || r == '\r': + return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r) + case isNL(r): + return lx.errorf("strings cannot contain newlines") + case r == rawStringEnd: + lx.backup() + lx.emit(itemRawString) + lx.next() + lx.ignore() + return lx.pop() + } + return lexRawString +} + +// lexMultilineRawString consumes a raw string. Nothing can be escaped in such +// a string. It assumes that the beginning "'''" has already been consumed and +// ignored. +func lexMultilineRawString(lx *lexer) stateFn { + r := lx.next() + switch r { + case eof: + return lx.errorf(`unexpected EOF; expected "'''"`) + case '\r': + if lx.peek() != '\n' { + return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r) + } + return lexMultilineRawString + case rawStringEnd: + /// Found ' → try to read two more ''. + if lx.accept(rawStringEnd) { + if lx.accept(rawStringEnd) { + /// Peek ahead: the string can contain ' and '', including at the + /// end: '''str''''' + /// 6 or more at the end, however, is an error. + if lx.peek() == rawStringEnd { + /// Check if we already lexed 5 's; if so we have 6 now, and + /// that's just too many man! + if strings.HasSuffix(lx.current(), "'''''") { + return lx.errorf(`unexpected "''''''"`) + } + lx.backup() + lx.backup() + return lexMultilineRawString + } + + lx.backup() /// backup: don't include the ''' in the item. + lx.backup() + lx.backup() + lx.emit(itemRawMultilineString) + lx.next() /// Read over ''' again and discard it. + lx.next() + lx.next() + lx.ignore() + return lx.pop() + } + lx.backup() + } + } + + if isControl(r) { + return lx.errorf("control characters are not allowed inside strings: '0x%02x'", r) + } + return lexMultilineRawString +} + +// lexMultilineStringEscape consumes an escaped character. It assumes that the +// preceding '\\' has already been consumed. +func lexMultilineStringEscape(lx *lexer) stateFn { + // Handle the special case first: + if isNL(lx.next()) { + return lexMultilineString + } + lx.backup() + lx.push(lexMultilineString) + return lexStringEscape(lx) +} + +func lexStringEscape(lx *lexer) stateFn { + r := lx.next() + switch r { + case 'b': + fallthrough + case 't': + fallthrough + case 'n': + fallthrough + case 'f': + fallthrough + case 'r': + fallthrough + case '"': + fallthrough + case ' ', '\t': + // Inside """ .. """ strings you can use \ to escape newlines, and any + // amount of whitespace can be between the \ and \n. + fallthrough + case '\\': + return lx.pop() + case 'u': + return lexShortUnicodeEscape + case 'U': + return lexLongUnicodeEscape + } + return lx.errorf("invalid escape character %q; only the following escape characters are allowed: "+ + `\b, \t, \n, \f, \r, \", \\, \uXXXX, and \UXXXXXXXX`, r) +} + +func lexShortUnicodeEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 4; i++ { + r = lx.next() + if !isHexadecimal(r) { + return lx.errorf( + `expected four hexadecimal digits after '\u', but got %q instead`, + lx.current()) + } + } + return lx.pop() +} + +func lexLongUnicodeEscape(lx *lexer) stateFn { + var r rune + for i := 0; i < 8; i++ { + r = lx.next() + if !isHexadecimal(r) { + return lx.errorf( + `expected eight hexadecimal digits after '\U', but got %q instead`, + lx.current()) + } + } + return lx.pop() +} + +// lexNumberOrDateStart processes the first character of a value which begins +// with a digit. It exists to catch values starting with '0', so that +// lexBaseNumberOrDate can differentiate base prefixed integers from other +// types. +func lexNumberOrDateStart(lx *lexer) stateFn { + r := lx.next() + switch r { + case '0': + return lexBaseNumberOrDate + } + + if !isDigit(r) { + // The only way to reach this state is if the value starts + // with a digit, so specifically treat anything else as an + // error. + return lx.errorf("expected a digit but got %q", r) + } + + return lexNumberOrDate +} + +// lexNumberOrDate consumes either an integer, float or datetime. +func lexNumberOrDate(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexNumberOrDate + } + switch r { + case '-', ':': + return lexDatetime + case '_': + return lexDecimalNumber + case '.', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDatetime consumes a Datetime, to a first approximation. +// The parser validates that it matches one of the accepted formats. +func lexDatetime(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexDatetime + } + switch r { + case '-', ':', 'T', 't', ' ', '.', 'Z', 'z', '+': + return lexDatetime + } + + lx.backup() + lx.emitTrim(itemDatetime) + return lx.pop() +} + +// lexHexInteger consumes a hexadecimal integer after seeing the '0x' prefix. +func lexHexInteger(lx *lexer) stateFn { + r := lx.next() + if isHexadecimal(r) { + return lexHexInteger + } + switch r { + case '_': + return lexHexInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexOctalInteger consumes an octal integer after seeing the '0o' prefix. +func lexOctalInteger(lx *lexer) stateFn { + r := lx.next() + if isOctal(r) { + return lexOctalInteger + } + switch r { + case '_': + return lexOctalInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexBinaryInteger consumes a binary integer after seeing the '0b' prefix. +func lexBinaryInteger(lx *lexer) stateFn { + r := lx.next() + if isBinary(r) { + return lexBinaryInteger + } + switch r { + case '_': + return lexBinaryInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDecimalNumber consumes a decimal float or integer. +func lexDecimalNumber(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexDecimalNumber + } + switch r { + case '.', 'e', 'E': + return lexFloat + case '_': + return lexDecimalNumber + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexDecimalNumber consumes the first digit of a number beginning with a sign. +// It assumes the sign has already been consumed. Values which start with a sign +// are only allowed to be decimal integers or floats. +// +// The special "nan" and "inf" values are also recognized. +func lexDecimalNumberStart(lx *lexer) stateFn { + r := lx.next() + + // Special error cases to give users better error messages + switch r { + case 'i': + if !lx.accept('n') || !lx.accept('f') { + return lx.errorf("invalid float: '%s'", lx.current()) + } + lx.emit(itemFloat) + return lx.pop() + case 'n': + if !lx.accept('a') || !lx.accept('n') { + return lx.errorf("invalid float: '%s'", lx.current()) + } + lx.emit(itemFloat) + return lx.pop() + case '0': + p := lx.peek() + switch p { + case 'b', 'o', 'x': + return lx.errorf("cannot use sign with non-decimal numbers: '%s%c'", lx.current(), p) + } + case '.': + return lx.errorf("floats must start with a digit, not '.'") + } + + if isDigit(r) { + return lexDecimalNumber + } + + return lx.errorf("expected a digit but got %q", r) +} + +// lexBaseNumberOrDate differentiates between the possible values which +// start with '0'. It assumes that before reaching this state, the initial '0' +// has been consumed. +func lexBaseNumberOrDate(lx *lexer) stateFn { + r := lx.next() + // Note: All datetimes start with at least two digits, so we don't + // handle date characters (':', '-', etc.) here. + if isDigit(r) { + return lexNumberOrDate + } + switch r { + case '_': + // Can only be decimal, because there can't be an underscore + // between the '0' and the base designator, and dates can't + // contain underscores. + return lexDecimalNumber + case '.', 'e', 'E': + return lexFloat + case 'b': + r = lx.peek() + if !isBinary(r) { + lx.errorf("not a binary number: '%s%c'", lx.current(), r) + } + return lexBinaryInteger + case 'o': + r = lx.peek() + if !isOctal(r) { + lx.errorf("not an octal number: '%s%c'", lx.current(), r) + } + return lexOctalInteger + case 'x': + r = lx.peek() + if !isHexadecimal(r) { + lx.errorf("not a hexidecimal number: '%s%c'", lx.current(), r) + } + return lexHexInteger + } + + lx.backup() + lx.emit(itemInteger) + return lx.pop() +} + +// lexFloat consumes the elements of a float. It allows any sequence of +// float-like characters, so floats emitted by the lexer are only a first +// approximation and must be validated by the parser. +func lexFloat(lx *lexer) stateFn { + r := lx.next() + if isDigit(r) { + return lexFloat + } + switch r { + case '_', '.', '-', '+', 'e', 'E': + return lexFloat + } + + lx.backup() + lx.emit(itemFloat) + return lx.pop() +} + +// lexBool consumes a bool string: 'true' or 'false. +func lexBool(lx *lexer) stateFn { + var rs []rune + for { + r := lx.next() + if !unicode.IsLetter(r) { + lx.backup() + break + } + rs = append(rs, r) + } + s := string(rs) + switch s { + case "true", "false": + lx.emit(itemBool) + return lx.pop() + } + return lx.errorf("expected value but found %q instead", s) +} + +// lexCommentStart begins the lexing of a comment. It will emit +// itemCommentStart and consume no characters, passing control to lexComment. +func lexCommentStart(lx *lexer) stateFn { + lx.ignore() + lx.emit(itemCommentStart) + return lexComment +} + +// lexComment lexes an entire comment. It assumes that '#' has been consumed. +// It will consume *up to* the first newline character, and pass control +// back to the last state on the stack. +func lexComment(lx *lexer) stateFn { + switch r := lx.next(); { + case isNL(r) || r == eof: + lx.backup() + lx.emit(itemText) + return lx.pop() + case isControl(r): + return lx.errorf("control characters are not allowed inside comments: '0x%02x'", r) + default: + return lexComment + } +} + +// lexSkip ignores all slurped input and moves on to the next state. +func lexSkip(lx *lexer, nextState stateFn) stateFn { + lx.ignore() + return nextState +} + +// isWhitespace returns true if `r` is a whitespace character according +// to the spec. +func isWhitespace(r rune) bool { + return r == '\t' || r == ' ' +} + +func isNL(r rune) bool { + return r == '\n' || r == '\r' +} + +// Control characters except \n, \t +func isControl(r rune) bool { + switch r { + case '\t', '\r', '\n': + return false + default: + return (r >= 0x00 && r <= 0x1f) || r == 0x7f + } +} + +func isDigit(r rune) bool { + return r >= '0' && r <= '9' +} + +func isHexadecimal(r rune) bool { + return (r >= '0' && r <= '9') || + (r >= 'a' && r <= 'f') || + (r >= 'A' && r <= 'F') +} + +func isOctal(r rune) bool { + return r >= '0' && r <= '7' +} + +func isBinary(r rune) bool { + return r == '0' || r == '1' +} + +func isBareKeyChar(r rune) bool { + return (r >= 'A' && r <= 'Z') || + (r >= 'a' && r <= 'z') || + (r >= '0' && r <= '9') || + r == '_' || + r == '-' +} + +func (s stateFn) String() string { + name := runtime.FuncForPC(reflect.ValueOf(s).Pointer()).Name() + if i := strings.LastIndexByte(name, '.'); i > -1 { + name = name[i+1:] + } + if s == nil { + name = "" + } + return name + "()" +} + +func (itype itemType) String() string { + switch itype { + case itemError: + return "Error" + case itemNIL: + return "NIL" + case itemEOF: + return "EOF" + case itemText: + return "Text" + case itemString, itemRawString, itemMultilineString, itemRawMultilineString: + return "String" + case itemBool: + return "Bool" + case itemInteger: + return "Integer" + case itemFloat: + return "Float" + case itemDatetime: + return "DateTime" + case itemTableStart: + return "TableStart" + case itemTableEnd: + return "TableEnd" + case itemKeyStart: + return "KeyStart" + case itemKeyEnd: + return "KeyEnd" + case itemArray: + return "Array" + case itemArrayEnd: + return "ArrayEnd" + case itemCommentStart: + return "CommentStart" + case itemInlineTableStart: + return "InlineTableStart" + case itemInlineTableEnd: + return "InlineTableEnd" + } + panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype))) +} + +func (item item) String() string { + return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val) +} diff --git a/vendor/github.com/BurntSushi/toml/parse.go b/vendor/github.com/BurntSushi/toml/parse.go new file mode 100644 index 0000000000..d9ae5db946 --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/parse.go @@ -0,0 +1,739 @@ +package toml + +import ( + "errors" + "fmt" + "strconv" + "strings" + "time" + "unicode/utf8" + + "github.com/BurntSushi/toml/internal" +) + +type parser struct { + mapping map[string]interface{} + types map[string]tomlType + lx *lexer + + ordered []Key // List of keys in the order that they appear in the TOML data. + context Key // Full key for the current hash in scope. + currentKey string // Base key name for everything except hashes. + approxLine int // Rough approximation of line number + implicits map[string]bool // Record implied keys (e.g. 'key.group.names'). +} + +// ParseError is used when a file can't be parsed: for example invalid integer +// literals, duplicate keys, etc. +type ParseError struct { + Message string + Line int + LastKey string +} + +func (pe ParseError) Error() string { + return fmt.Sprintf("Near line %d (last key parsed '%s'): %s", + pe.Line, pe.LastKey, pe.Message) +} + +func parse(data string) (p *parser, err error) { + defer func() { + if r := recover(); r != nil { + var ok bool + if err, ok = r.(ParseError); ok { + return + } + panic(r) + } + }() + + // Read over BOM; do this here as the lexer calls utf8.DecodeRuneInString() + // which mangles stuff. + if strings.HasPrefix(data, "\xff\xfe") || strings.HasPrefix(data, "\xfe\xff") { + data = data[2:] + } + + // Examine first few bytes for NULL bytes; this probably means it's a UTF-16 + // file (second byte in surrogate pair being NULL). Again, do this here to + // avoid having to deal with UTF-8/16 stuff in the lexer. + ex := 6 + if len(data) < 6 { + ex = len(data) + } + if strings.ContainsRune(data[:ex], 0) { + return nil, errors.New("files cannot contain NULL bytes; probably using UTF-16; TOML files must be UTF-8") + } + + p = &parser{ + mapping: make(map[string]interface{}), + types: make(map[string]tomlType), + lx: lex(data), + ordered: make([]Key, 0), + implicits: make(map[string]bool), + } + for { + item := p.next() + if item.typ == itemEOF { + break + } + p.topLevel(item) + } + + return p, nil +} + +func (p *parser) panicf(format string, v ...interface{}) { + msg := fmt.Sprintf(format, v...) + panic(ParseError{ + Message: msg, + Line: p.approxLine, + LastKey: p.current(), + }) +} + +func (p *parser) next() item { + it := p.lx.nextItem() + //fmt.Printf("ITEM %-18s line %-3d │ %q\n", it.typ, it.line, it.val) + if it.typ == itemError { + p.panicf("%s", it.val) + } + return it +} + +func (p *parser) bug(format string, v ...interface{}) { + panic(fmt.Sprintf("BUG: "+format+"\n\n", v...)) +} + +func (p *parser) expect(typ itemType) item { + it := p.next() + p.assertEqual(typ, it.typ) + return it +} + +func (p *parser) assertEqual(expected, got itemType) { + if expected != got { + p.bug("Expected '%s' but got '%s'.", expected, got) + } +} + +func (p *parser) topLevel(item item) { + switch item.typ { + case itemCommentStart: // # .. + p.approxLine = item.line + p.expect(itemText) + case itemTableStart: // [ .. ] + name := p.next() + p.approxLine = name.line + + var key Key + for ; name.typ != itemTableEnd && name.typ != itemEOF; name = p.next() { + key = append(key, p.keyString(name)) + } + p.assertEqual(itemTableEnd, name.typ) + + p.addContext(key, false) + p.setType("", tomlHash) + p.ordered = append(p.ordered, key) + case itemArrayTableStart: // [[ .. ]] + name := p.next() + p.approxLine = name.line + + var key Key + for ; name.typ != itemArrayTableEnd && name.typ != itemEOF; name = p.next() { + key = append(key, p.keyString(name)) + } + p.assertEqual(itemArrayTableEnd, name.typ) + + p.addContext(key, true) + p.setType("", tomlArrayHash) + p.ordered = append(p.ordered, key) + case itemKeyStart: // key = .. + outerContext := p.context + /// Read all the key parts (e.g. 'a' and 'b' in 'a.b') + k := p.next() + p.approxLine = k.line + var key Key + for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() { + key = append(key, p.keyString(k)) + } + p.assertEqual(itemKeyEnd, k.typ) + + /// The current key is the last part. + p.currentKey = key[len(key)-1] + + /// All the other parts (if any) are the context; need to set each part + /// as implicit. + context := key[:len(key)-1] + for i := range context { + p.addImplicitContext(append(p.context, context[i:i+1]...)) + } + + /// Set value. + val, typ := p.value(p.next(), false) + p.set(p.currentKey, val, typ) + p.ordered = append(p.ordered, p.context.add(p.currentKey)) + + /// Remove the context we added (preserving any context from [tbl] lines). + p.context = outerContext + p.currentKey = "" + default: + p.bug("Unexpected type at top level: %s", item.typ) + } +} + +// Gets a string for a key (or part of a key in a table name). +func (p *parser) keyString(it item) string { + switch it.typ { + case itemText: + return it.val + case itemString, itemMultilineString, + itemRawString, itemRawMultilineString: + s, _ := p.value(it, false) + return s.(string) + default: + p.bug("Unexpected key type: %s", it.typ) + } + panic("unreachable") +} + +var datetimeRepl = strings.NewReplacer( + "z", "Z", + "t", "T", + " ", "T") + +// value translates an expected value from the lexer into a Go value wrapped +// as an empty interface. +func (p *parser) value(it item, parentIsArray bool) (interface{}, tomlType) { + switch it.typ { + case itemString: + return p.replaceEscapes(it.val), p.typeOfPrimitive(it) + case itemMultilineString: + return p.replaceEscapes(stripFirstNewline(stripEscapedNewlines(it.val))), p.typeOfPrimitive(it) + case itemRawString: + return it.val, p.typeOfPrimitive(it) + case itemRawMultilineString: + return stripFirstNewline(it.val), p.typeOfPrimitive(it) + case itemInteger: + return p.valueInteger(it) + case itemFloat: + return p.valueFloat(it) + case itemBool: + switch it.val { + case "true": + return true, p.typeOfPrimitive(it) + case "false": + return false, p.typeOfPrimitive(it) + default: + p.bug("Expected boolean value, but got '%s'.", it.val) + } + case itemDatetime: + return p.valueDatetime(it) + case itemArray: + return p.valueArray(it) + case itemInlineTableStart: + return p.valueInlineTable(it, parentIsArray) + default: + p.bug("Unexpected value type: %s", it.typ) + } + panic("unreachable") +} + +func (p *parser) valueInteger(it item) (interface{}, tomlType) { + if !numUnderscoresOK(it.val) { + p.panicf("Invalid integer %q: underscores must be surrounded by digits", it.val) + } + if numHasLeadingZero(it.val) { + p.panicf("Invalid integer %q: cannot have leading zeroes", it.val) + } + + num, err := strconv.ParseInt(it.val, 0, 64) + if err != nil { + // Distinguish integer values. Normally, it'd be a bug if the lexer + // provides an invalid integer, but it's possible that the number is + // out of range of valid values (which the lexer cannot determine). + // So mark the former as a bug but the latter as a legitimate user + // error. + if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { + p.panicf("Integer '%s' is out of the range of 64-bit signed integers.", it.val) + } else { + p.bug("Expected integer value, but got '%s'.", it.val) + } + } + return num, p.typeOfPrimitive(it) +} + +func (p *parser) valueFloat(it item) (interface{}, tomlType) { + parts := strings.FieldsFunc(it.val, func(r rune) bool { + switch r { + case '.', 'e', 'E': + return true + } + return false + }) + for _, part := range parts { + if !numUnderscoresOK(part) { + p.panicf("Invalid float %q: underscores must be surrounded by digits", it.val) + } + } + if len(parts) > 0 && numHasLeadingZero(parts[0]) { + p.panicf("Invalid float %q: cannot have leading zeroes", it.val) + } + if !numPeriodsOK(it.val) { + // As a special case, numbers like '123.' or '1.e2', + // which are valid as far as Go/strconv are concerned, + // must be rejected because TOML says that a fractional + // part consists of '.' followed by 1+ digits. + p.panicf("Invalid float %q: '.' must be followed by one or more digits", it.val) + } + val := strings.Replace(it.val, "_", "", -1) + if val == "+nan" || val == "-nan" { // Go doesn't support this, but TOML spec does. + val = "nan" + } + num, err := strconv.ParseFloat(val, 64) + if err != nil { + if e, ok := err.(*strconv.NumError); ok && e.Err == strconv.ErrRange { + p.panicf("Float '%s' is out of the range of 64-bit IEEE-754 floating-point numbers.", it.val) + } else { + p.panicf("Invalid float value: %q", it.val) + } + } + return num, p.typeOfPrimitive(it) +} + +var dtTypes = []struct { + fmt string + zone *time.Location +}{ + {time.RFC3339Nano, time.Local}, + {"2006-01-02T15:04:05.999999999", internal.LocalDatetime}, + {"2006-01-02", internal.LocalDate}, + {"15:04:05.999999999", internal.LocalTime}, +} + +func (p *parser) valueDatetime(it item) (interface{}, tomlType) { + it.val = datetimeRepl.Replace(it.val) + var ( + t time.Time + ok bool + err error + ) + for _, dt := range dtTypes { + t, err = time.ParseInLocation(dt.fmt, it.val, dt.zone) + if err == nil { + ok = true + break + } + } + if !ok { + p.panicf("Invalid TOML Datetime: %q.", it.val) + } + return t, p.typeOfPrimitive(it) +} + +func (p *parser) valueArray(it item) (interface{}, tomlType) { + p.setType(p.currentKey, tomlArray) + + // p.setType(p.currentKey, typ) + var ( + array []interface{} + types []tomlType + ) + for it = p.next(); it.typ != itemArrayEnd; it = p.next() { + if it.typ == itemCommentStart { + p.expect(itemText) + continue + } + + val, typ := p.value(it, true) + array = append(array, val) + types = append(types, typ) + } + return array, tomlArray +} + +func (p *parser) valueInlineTable(it item, parentIsArray bool) (interface{}, tomlType) { + var ( + hash = make(map[string]interface{}) + outerContext = p.context + outerKey = p.currentKey + ) + + p.context = append(p.context, p.currentKey) + prevContext := p.context + p.currentKey = "" + + p.addImplicit(p.context) + p.addContext(p.context, parentIsArray) + + /// Loop over all table key/value pairs. + for it := p.next(); it.typ != itemInlineTableEnd; it = p.next() { + if it.typ == itemCommentStart { + p.expect(itemText) + continue + } + + /// Read all key parts. + k := p.next() + p.approxLine = k.line + var key Key + for ; k.typ != itemKeyEnd && k.typ != itemEOF; k = p.next() { + key = append(key, p.keyString(k)) + } + p.assertEqual(itemKeyEnd, k.typ) + + /// The current key is the last part. + p.currentKey = key[len(key)-1] + + /// All the other parts (if any) are the context; need to set each part + /// as implicit. + context := key[:len(key)-1] + for i := range context { + p.addImplicitContext(append(p.context, context[i:i+1]...)) + } + + /// Set the value. + val, typ := p.value(p.next(), false) + p.set(p.currentKey, val, typ) + p.ordered = append(p.ordered, p.context.add(p.currentKey)) + hash[p.currentKey] = val + + /// Restore context. + p.context = prevContext + } + p.context = outerContext + p.currentKey = outerKey + return hash, tomlHash +} + +// numHasLeadingZero checks if this number has leading zeroes, allowing for '0', +// +/- signs, and base prefixes. +func numHasLeadingZero(s string) bool { + if len(s) > 1 && s[0] == '0' && isDigit(rune(s[1])) { // >1 to allow "0" and isDigit to allow 0x + return true + } + if len(s) > 2 && (s[0] == '-' || s[0] == '+') && s[1] == '0' { + return true + } + return false +} + +// numUnderscoresOK checks whether each underscore in s is surrounded by +// characters that are not underscores. +func numUnderscoresOK(s string) bool { + switch s { + case "nan", "+nan", "-nan", "inf", "-inf", "+inf": + return true + } + accept := false + for _, r := range s { + if r == '_' { + if !accept { + return false + } + } + + // isHexadecimal is a superset of all the permissable characters + // surrounding an underscore. + accept = isHexadecimal(r) + } + return accept +} + +// numPeriodsOK checks whether every period in s is followed by a digit. +func numPeriodsOK(s string) bool { + period := false + for _, r := range s { + if period && !isDigit(r) { + return false + } + period = r == '.' + } + return !period +} + +// Set the current context of the parser, where the context is either a hash or +// an array of hashes, depending on the value of the `array` parameter. +// +// Establishing the context also makes sure that the key isn't a duplicate, and +// will create implicit hashes automatically. +func (p *parser) addContext(key Key, array bool) { + var ok bool + + // Always start at the top level and drill down for our context. + hashContext := p.mapping + keyContext := make(Key, 0) + + // We only need implicit hashes for key[0:-1] + for _, k := range key[0 : len(key)-1] { + _, ok = hashContext[k] + keyContext = append(keyContext, k) + + // No key? Make an implicit hash and move on. + if !ok { + p.addImplicit(keyContext) + hashContext[k] = make(map[string]interface{}) + } + + // If the hash context is actually an array of tables, then set + // the hash context to the last element in that array. + // + // Otherwise, it better be a table, since this MUST be a key group (by + // virtue of it not being the last element in a key). + switch t := hashContext[k].(type) { + case []map[string]interface{}: + hashContext = t[len(t)-1] + case map[string]interface{}: + hashContext = t + default: + p.panicf("Key '%s' was already created as a hash.", keyContext) + } + } + + p.context = keyContext + if array { + // If this is the first element for this array, then allocate a new + // list of tables for it. + k := key[len(key)-1] + if _, ok := hashContext[k]; !ok { + hashContext[k] = make([]map[string]interface{}, 0, 4) + } + + // Add a new table. But make sure the key hasn't already been used + // for something else. + if hash, ok := hashContext[k].([]map[string]interface{}); ok { + hashContext[k] = append(hash, make(map[string]interface{})) + } else { + p.panicf("Key '%s' was already created and cannot be used as an array.", keyContext) + } + } else { + p.setValue(key[len(key)-1], make(map[string]interface{})) + } + p.context = append(p.context, key[len(key)-1]) +} + +// set calls setValue and setType. +func (p *parser) set(key string, val interface{}, typ tomlType) { + p.setValue(p.currentKey, val) + p.setType(p.currentKey, typ) +} + +// setValue sets the given key to the given value in the current context. +// It will make sure that the key hasn't already been defined, account for +// implicit key groups. +func (p *parser) setValue(key string, value interface{}) { + var ( + tmpHash interface{} + ok bool + hash = p.mapping + keyContext Key + ) + for _, k := range p.context { + keyContext = append(keyContext, k) + if tmpHash, ok = hash[k]; !ok { + p.bug("Context for key '%s' has not been established.", keyContext) + } + switch t := tmpHash.(type) { + case []map[string]interface{}: + // The context is a table of hashes. Pick the most recent table + // defined as the current hash. + hash = t[len(t)-1] + case map[string]interface{}: + hash = t + default: + p.panicf("Key '%s' has already been defined.", keyContext) + } + } + keyContext = append(keyContext, key) + + if _, ok := hash[key]; ok { + // Normally redefining keys isn't allowed, but the key could have been + // defined implicitly and it's allowed to be redefined concretely. (See + // the `valid/implicit-and-explicit-after.toml` in toml-test) + // + // But we have to make sure to stop marking it as an implicit. (So that + // another redefinition provokes an error.) + // + // Note that since it has already been defined (as a hash), we don't + // want to overwrite it. So our business is done. + if p.isArray(keyContext) { + p.removeImplicit(keyContext) + hash[key] = value + return + } + if p.isImplicit(keyContext) { + p.removeImplicit(keyContext) + return + } + + // Otherwise, we have a concrete key trying to override a previous + // key, which is *always* wrong. + p.panicf("Key '%s' has already been defined.", keyContext) + } + + hash[key] = value +} + +// setType sets the type of a particular value at a given key. +// It should be called immediately AFTER setValue. +// +// Note that if `key` is empty, then the type given will be applied to the +// current context (which is either a table or an array of tables). +func (p *parser) setType(key string, typ tomlType) { + keyContext := make(Key, 0, len(p.context)+1) + for _, k := range p.context { + keyContext = append(keyContext, k) + } + if len(key) > 0 { // allow type setting for hashes + keyContext = append(keyContext, key) + } + p.types[keyContext.String()] = typ +} + +// Implicit keys need to be created when tables are implied in "a.b.c.d = 1" and +// "[a.b.c]" (the "a", "b", and "c" hashes are never created explicitly). +func (p *parser) addImplicit(key Key) { p.implicits[key.String()] = true } +func (p *parser) removeImplicit(key Key) { p.implicits[key.String()] = false } +func (p *parser) isImplicit(key Key) bool { return p.implicits[key.String()] } +func (p *parser) isArray(key Key) bool { return p.types[key.String()] == tomlArray } +func (p *parser) addImplicitContext(key Key) { + p.addImplicit(key) + p.addContext(key, false) +} + +// current returns the full key name of the current context. +func (p *parser) current() string { + if len(p.currentKey) == 0 { + return p.context.String() + } + if len(p.context) == 0 { + return p.currentKey + } + return fmt.Sprintf("%s.%s", p.context, p.currentKey) +} + +func stripFirstNewline(s string) string { + if len(s) > 0 && s[0] == '\n' { + return s[1:] + } + if len(s) > 1 && s[0] == '\r' && s[1] == '\n' { + return s[2:] + } + return s +} + +// Remove newlines inside triple-quoted strings if a line ends with "\". +func stripEscapedNewlines(s string) string { + split := strings.Split(s, "\n") + if len(split) < 1 { + return s + } + + escNL := false // Keep track of the last non-blank line was escaped. + for i, line := range split { + line = strings.TrimRight(line, " \t\r") + + if len(line) == 0 || line[len(line)-1] != '\\' { + split[i] = strings.TrimRight(split[i], "\r") + if !escNL && i != len(split)-1 { + split[i] += "\n" + } + continue + } + + escBS := true + for j := len(line) - 1; j >= 0 && line[j] == '\\'; j-- { + escBS = !escBS + } + if escNL { + line = strings.TrimLeft(line, " \t\r") + } + escNL = !escBS + + if escBS { + split[i] += "\n" + continue + } + + split[i] = line[:len(line)-1] // Remove \ + if len(split)-1 > i { + split[i+1] = strings.TrimLeft(split[i+1], " \t\r") + } + } + return strings.Join(split, "") +} + +func (p *parser) replaceEscapes(str string) string { + var replaced []rune + s := []byte(str) + r := 0 + for r < len(s) { + if s[r] != '\\' { + c, size := utf8.DecodeRune(s[r:]) + r += size + replaced = append(replaced, c) + continue + } + r += 1 + if r >= len(s) { + p.bug("Escape sequence at end of string.") + return "" + } + switch s[r] { + default: + p.bug("Expected valid escape code after \\, but got %q.", s[r]) + return "" + case ' ', '\t': + p.panicf("invalid escape: '\\%c'", s[r]) + return "" + case 'b': + replaced = append(replaced, rune(0x0008)) + r += 1 + case 't': + replaced = append(replaced, rune(0x0009)) + r += 1 + case 'n': + replaced = append(replaced, rune(0x000A)) + r += 1 + case 'f': + replaced = append(replaced, rune(0x000C)) + r += 1 + case 'r': + replaced = append(replaced, rune(0x000D)) + r += 1 + case '"': + replaced = append(replaced, rune(0x0022)) + r += 1 + case '\\': + replaced = append(replaced, rune(0x005C)) + r += 1 + case 'u': + // At this point, we know we have a Unicode escape of the form + // `uXXXX` at [r, r+5). (Because the lexer guarantees this + // for us.) + escaped := p.asciiEscapeToUnicode(s[r+1 : r+5]) + replaced = append(replaced, escaped) + r += 5 + case 'U': + // At this point, we know we have a Unicode escape of the form + // `uXXXX` at [r, r+9). (Because the lexer guarantees this + // for us.) + escaped := p.asciiEscapeToUnicode(s[r+1 : r+9]) + replaced = append(replaced, escaped) + r += 9 + } + } + return string(replaced) +} + +func (p *parser) asciiEscapeToUnicode(bs []byte) rune { + s := string(bs) + hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32) + if err != nil { + p.bug("Could not parse '%s' as a hexadecimal number, but the "+ + "lexer claims it's OK: %s", s, err) + } + if !utf8.ValidRune(rune(hex)) { + p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s) + } + return rune(hex) +} diff --git a/vendor/github.com/BurntSushi/toml/type_check.go b/vendor/github.com/BurntSushi/toml/type_check.go new file mode 100644 index 0000000000..d56aa80faf --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/type_check.go @@ -0,0 +1,70 @@ +package toml + +// tomlType represents any Go type that corresponds to a TOML type. +// While the first draft of the TOML spec has a simplistic type system that +// probably doesn't need this level of sophistication, we seem to be militating +// toward adding real composite types. +type tomlType interface { + typeString() string +} + +// typeEqual accepts any two types and returns true if they are equal. +func typeEqual(t1, t2 tomlType) bool { + if t1 == nil || t2 == nil { + return false + } + return t1.typeString() == t2.typeString() +} + +func typeIsHash(t tomlType) bool { + return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash) +} + +type tomlBaseType string + +func (btype tomlBaseType) typeString() string { + return string(btype) +} + +func (btype tomlBaseType) String() string { + return btype.typeString() +} + +var ( + tomlInteger tomlBaseType = "Integer" + tomlFloat tomlBaseType = "Float" + tomlDatetime tomlBaseType = "Datetime" + tomlString tomlBaseType = "String" + tomlBool tomlBaseType = "Bool" + tomlArray tomlBaseType = "Array" + tomlHash tomlBaseType = "Hash" + tomlArrayHash tomlBaseType = "ArrayHash" +) + +// typeOfPrimitive returns a tomlType of any primitive value in TOML. +// Primitive values are: Integer, Float, Datetime, String and Bool. +// +// Passing a lexer item other than the following will cause a BUG message +// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime. +func (p *parser) typeOfPrimitive(lexItem item) tomlType { + switch lexItem.typ { + case itemInteger: + return tomlInteger + case itemFloat: + return tomlFloat + case itemDatetime: + return tomlDatetime + case itemString: + return tomlString + case itemMultilineString: + return tomlString + case itemRawString: + return tomlString + case itemRawMultilineString: + return tomlString + case itemBool: + return tomlBool + } + p.bug("Cannot infer primitive type of lex item '%s'.", lexItem) + panic("unreachable") +} diff --git a/vendor/github.com/BurntSushi/toml/type_fields.go b/vendor/github.com/BurntSushi/toml/type_fields.go new file mode 100644 index 0000000000..608997c22f --- /dev/null +++ b/vendor/github.com/BurntSushi/toml/type_fields.go @@ -0,0 +1,242 @@ +package toml + +// Struct field handling is adapted from code in encoding/json: +// +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the Go distribution. + +import ( + "reflect" + "sort" + "sync" +) + +// A field represents a single field found in a struct. +type field struct { + name string // the name of the field (`toml` tag included) + tag bool // whether field has a `toml` tag + index []int // represents the depth of an anonymous field + typ reflect.Type // the type of the field +} + +// byName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from toml tag", then +// breaking ties with index sequence. +type byName []field + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that TOML should recognize for the given +// type. The algorithm is breadth-first search over the set of structs to +// include - the top struct and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + count := map[reflect.Type]int{} + nextCount := map[reflect.Type]int{} + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" && !sf.Anonymous { // unexported + continue + } + opts := getOptions(sf.Tag) + if opts.skip { + continue + } + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Record found field and index sequence. + if opts.name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { + tagged := opts.name != "" + name := opts.name + if name == "" { + name = sf.Name + } + fields = append(fields, field{name, tagged, index, ft}) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + f := field{name: ft.Name(), index: index, typ: ft} + next = append(next, f) + } + } + } + } + + sort.Sort(byName(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with TOML tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + name := fi.name + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != name { + break + } + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// TOML tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + sync.RWMutex + m map[reflect.Type][]field +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + fieldCache.RLock() + f := fieldCache.m[t] + fieldCache.RUnlock() + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.Lock() + if fieldCache.m == nil { + fieldCache.m = map[reflect.Type][]field{} + } + fieldCache.m[t] = f + fieldCache.Unlock() + return f +} diff --git a/vendor/github.com/MakeNowJust/heredoc/LICENSE b/vendor/github.com/MakeNowJust/heredoc/LICENSE new file mode 100644 index 0000000000..8a58c22208 --- /dev/null +++ b/vendor/github.com/MakeNowJust/heredoc/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014-2017 TSUYUSATO Kitsune + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/MakeNowJust/heredoc/README.md b/vendor/github.com/MakeNowJust/heredoc/README.md new file mode 100644 index 0000000000..a3a65faba1 --- /dev/null +++ b/vendor/github.com/MakeNowJust/heredoc/README.md @@ -0,0 +1,53 @@ +# heredoc [![CircleCI](https://circleci.com/gh/MakeNowJust/heredoc.svg?style=svg)](https://circleci.com/gh/MakeNowJust/heredoc) [![Go Walker](http://gowalker.org/api/v1/badge)](https://gowalker.org/github.com/MakeNowJust/heredoc) + +## About + +Package heredoc provides the here-document with keeping indent. + +## Install + +```console +$ go get github.com/MakeNowJust/heredoc +``` + +## Import + +```go +// usual +import "github.com/MakeNowJust/heredoc" +// shortcuts +import . "github.com/MakeNowJust/heredoc/dot" +``` + +## Example + +```go +package main + +import ( + "fmt" + . "github.com/MakeNowJust/heredoc/dot" +) + +func main() { + fmt.Println(D(` + Lorem ipsum dolor sit amet, consectetur adipisicing elit, + sed do eiusmod tempor incididunt ut labore et dolore magna + aliqua. Ut enim ad minim veniam, ... + `)) + // Output: + // Lorem ipsum dolor sit amet, consectetur adipisicing elit, + // sed do eiusmod tempor incididunt ut labore et dolore magna + // aliqua. Ut enim ad minim veniam, ... + // +} +``` + +## API Document + + - [Go Walker - github.com/MakeNowJust/heredoc](https://gowalker.org/github.com/MakeNowJust/heredoc) + - [Go Walker - github.com/MakeNowJust/heredoc/dot](https://gowalker.org/github.com/MakeNowJust/heredoc/dot) + +## License + +This software is released under the MIT License, see LICENSE. diff --git a/vendor/github.com/MakeNowJust/heredoc/heredoc.go b/vendor/github.com/MakeNowJust/heredoc/heredoc.go new file mode 100644 index 0000000000..fea12e622f --- /dev/null +++ b/vendor/github.com/MakeNowJust/heredoc/heredoc.go @@ -0,0 +1,98 @@ +// Copyright (c) 2014-2017 TSUYUSATO Kitsune +// This software is released under the MIT License. +// http://opensource.org/licenses/mit-license.php + +// Package heredoc provides creation of here-documents from raw strings. +// +// Golang supports raw-string syntax. +// doc := ` +// Foo +// Bar +// ` +// But raw-string cannot recognize indentation. Thus such content is an indented string, equivalent to +// "\n\tFoo\n\tBar\n" +// I dont't want this! +// +// However this problem is solved by package heredoc. +// doc := heredoc.Doc(` +// Foo +// Bar +// `) +// Is equivalent to +// "Foo\nBar\n" +package heredoc + +import ( + "fmt" + "strings" + "unicode" +) + +const maxInt = int(^uint(0) >> 1) + +// Doc returns un-indented string as here-document. +func Doc(raw string) string { + skipFirstLine := false + if raw[0] == '\n' { + raw = raw[1:] + } else { + skipFirstLine = true + } + + lines := strings.Split(raw, "\n") + + minIndentSize := getMinIndent(lines, skipFirstLine) + lines = removeIndentation(lines, minIndentSize, skipFirstLine) + + return strings.Join(lines, "\n") +} + +// getMinIndent calculates the minimum indentation in lines, excluding empty lines. +func getMinIndent(lines []string, skipFirstLine bool) int { + minIndentSize := maxInt + + for i, line := range lines { + if i == 0 && skipFirstLine { + continue + } + + indentSize := 0 + for _, r := range []rune(line) { + if unicode.IsSpace(r) { + indentSize += 1 + } else { + break + } + } + + if len(line) == indentSize { + if i == len(lines)-1 && indentSize < minIndentSize { + lines[i] = "" + } + } else if indentSize < minIndentSize { + minIndentSize = indentSize + } + } + return minIndentSize +} + +// removeIndentation removes n characters from the front of each line in lines. +// Skips first line if skipFirstLine is true, skips empty lines. +func removeIndentation(lines []string, n int, skipFirstLine bool) []string { + for i, line := range lines { + if i == 0 && skipFirstLine { + continue + } + + if len(lines[i]) >= n { + lines[i] = line[n:] + } + } + return lines +} + +// Docf returns unindented and formatted string as here-document. +// Formatting is done as for fmt.Printf(). +func Docf(raw string, args ...interface{}) string { + return fmt.Sprintf(Doc(raw), args...) +} diff --git a/vendor/github.com/Masterminds/goutils/.travis.yml b/vendor/github.com/Masterminds/goutils/.travis.yml new file mode 100644 index 0000000000..4025e01ec4 --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/.travis.yml @@ -0,0 +1,18 @@ +language: go + +go: + - 1.6 + - 1.7 + - 1.8 + - tip + +script: + - go test -v + +notifications: + webhooks: + urls: + - https://webhooks.gitter.im/e/06e3328629952dabe3e0 + on_success: change # options: [always|never|change] default: always + on_failure: always # options: [always|never|change] default: always + on_start: never # options: [always|never|change] default: always diff --git a/vendor/github.com/Masterminds/goutils/CHANGELOG.md b/vendor/github.com/Masterminds/goutils/CHANGELOG.md new file mode 100644 index 0000000000..d700ec47f2 --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/CHANGELOG.md @@ -0,0 +1,8 @@ +# 1.0.1 (2017-05-31) + +## Fixed +- #21: Fix generation of alphanumeric strings (thanks @dbarranco) + +# 1.0.0 (2014-04-30) + +- Initial release. diff --git a/vendor/github.com/Masterminds/goutils/LICENSE.txt b/vendor/github.com/Masterminds/goutils/LICENSE.txt new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/Masterminds/goutils/README.md b/vendor/github.com/Masterminds/goutils/README.md new file mode 100644 index 0000000000..163ffe72a8 --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/README.md @@ -0,0 +1,70 @@ +GoUtils +=========== +[![Stability: Maintenance](https://masterminds.github.io/stability/maintenance.svg)](https://masterminds.github.io/stability/maintenance.html) +[![GoDoc](https://godoc.org/github.com/Masterminds/goutils?status.png)](https://godoc.org/github.com/Masterminds/goutils) [![Build Status](https://travis-ci.org/Masterminds/goutils.svg?branch=master)](https://travis-ci.org/Masterminds/goutils) [![Build status](https://ci.appveyor.com/api/projects/status/sc2b1ew0m7f0aiju?svg=true)](https://ci.appveyor.com/project/mattfarina/goutils) + + +GoUtils provides users with utility functions to manipulate strings in various ways. It is a Go implementation of some +string manipulation libraries of Java Apache Commons. GoUtils includes the following Java Apache Commons classes: +* WordUtils +* RandomStringUtils +* StringUtils (partial implementation) + +## Installation +If you have Go set up on your system, from the GOPATH directory within the command line/terminal, enter this: + + go get github.com/Masterminds/goutils + +If you do not have Go set up on your system, please follow the [Go installation directions from the documenation](http://golang.org/doc/install), and then follow the instructions above to install GoUtils. + + +## Documentation +GoUtils doc is available here: [![GoDoc](https://godoc.org/github.com/Masterminds/goutils?status.png)](https://godoc.org/github.com/Masterminds/goutils) + + +## Usage +The code snippets below show examples of how to use GoUtils. Some functions return errors while others do not. The first instance below, which does not return an error, is the `Initials` function (located within the `wordutils.go` file). + + package main + + import ( + "fmt" + "github.com/Masterminds/goutils" + ) + + func main() { + + // EXAMPLE 1: A goutils function which returns no errors + fmt.Println (goutils.Initials("John Doe Foo")) // Prints out "JDF" + + } +Some functions return errors mainly due to illegal arguements used as parameters. The code example below illustrates how to deal with function that returns an error. In this instance, the function is the `Random` function (located within the `randomstringutils.go` file). + + package main + + import ( + "fmt" + "github.com/Masterminds/goutils" + ) + + func main() { + + // EXAMPLE 2: A goutils function which returns an error + rand1, err1 := goutils.Random (-1, 0, 0, true, true) + + if err1 != nil { + fmt.Println(err1) // Prints out error message because -1 was entered as the first parameter in goutils.Random(...) + } else { + fmt.Println(rand1) + } + + } + +## License +GoUtils is licensed under the Apache License, Version 2.0. Please check the LICENSE.txt file or visit http://www.apache.org/licenses/LICENSE-2.0 for a copy of the license. + +## Issue Reporting +Make suggestions or report issues using the Git issue tracker: https://github.com/Masterminds/goutils/issues + +## Website +* [GoUtils webpage](http://Masterminds.github.io/goutils/) diff --git a/vendor/github.com/Masterminds/goutils/appveyor.yml b/vendor/github.com/Masterminds/goutils/appveyor.yml new file mode 100644 index 0000000000..657564a847 --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/appveyor.yml @@ -0,0 +1,21 @@ +version: build-{build}.{branch} + +clone_folder: C:\gopath\src\github.com\Masterminds\goutils +shallow_clone: true + +environment: + GOPATH: C:\gopath + +platform: + - x64 + +build: off + +install: + - go version + - go env + +test_script: + - go test -v + +deploy: off diff --git a/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go b/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go new file mode 100644 index 0000000000..8dbd924858 --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/cryptorandomstringutils.go @@ -0,0 +1,230 @@ +/* +Copyright 2014 Alexander Okoli + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package goutils + +import ( + "crypto/rand" + "fmt" + "math" + "math/big" + "unicode" +) + +/* +CryptoRandomNonAlphaNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of all characters (ASCII/Unicode values between 0 to 2,147,483,647 (math.MaxInt32)). + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomNonAlphaNumeric(count int) (string, error) { + return CryptoRandomAlphaNumericCustom(count, false, false) +} + +/* +CryptoRandomAscii creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of characters whose ASCII value is between 32 and 126 (inclusive). + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomAscii(count int) (string, error) { + return CryptoRandom(count, 32, 127, false, false) +} + +/* +CryptoRandomNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of numeric characters. + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomNumeric(count int) (string, error) { + return CryptoRandom(count, 0, 0, false, true) +} + +/* +CryptoRandomAlphabetic creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. + +Parameters: + count - the length of random string to create + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomAlphabetic(count int) (string, error) { + return CryptoRandom(count, 0, 0, true, false) +} + +/* +CryptoRandomAlphaNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alpha-numeric characters. + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomAlphaNumeric(count int) (string, error) { + return CryptoRandom(count, 0, 0, true, true) +} + +/* +CryptoRandomAlphaNumericCustom creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. + +Parameters: + count - the length of random string to create + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, CryptoRandom(...) +*/ +func CryptoRandomAlphaNumericCustom(count int, letters bool, numbers bool) (string, error) { + return CryptoRandom(count, 0, 0, letters, numbers) +} + +/* +CryptoRandom creates a random string based on a variety of options, using using golang's crypto/rand source of randomness. +If the parameters start and end are both 0, start and end are set to ' ' and 'z', the ASCII printable characters, will be used, +unless letters and numbers are both false, in which case, start and end are set to 0 and math.MaxInt32, respectively. +If chars is not nil, characters stored in chars that are between start and end are chosen. + +Parameters: + count - the length of random string to create + start - the position in set of chars (ASCII/Unicode int) to start at + end - the position in set of chars (ASCII/Unicode int) to end before + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars. + +Returns: + string - the random string + error - an error stemming from invalid parameters: if count < 0; or the provided chars array is empty; or end <= start; or end > len(chars) +*/ +func CryptoRandom(count int, start int, end int, letters bool, numbers bool, chars ...rune) (string, error) { + if count == 0 { + return "", nil + } else if count < 0 { + err := fmt.Errorf("randomstringutils illegal argument: Requested random string length %v is less than 0.", count) // equiv to err := errors.New("...") + return "", err + } + if chars != nil && len(chars) == 0 { + err := fmt.Errorf("randomstringutils illegal argument: The chars array must not be empty") + return "", err + } + + if start == 0 && end == 0 { + if chars != nil { + end = len(chars) + } else { + if !letters && !numbers { + end = math.MaxInt32 + } else { + end = 'z' + 1 + start = ' ' + } + } + } else { + if end <= start { + err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) must be greater than start (%v)", end, start) + return "", err + } + + if chars != nil && end > len(chars) { + err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) cannot be greater than len(chars) (%v)", end, len(chars)) + return "", err + } + } + + buffer := make([]rune, count) + gap := end - start + + // high-surrogates range, (\uD800-\uDBFF) = 55296 - 56319 + // low-surrogates range, (\uDC00-\uDFFF) = 56320 - 57343 + + for count != 0 { + count-- + var ch rune + if chars == nil { + ch = rune(getCryptoRandomInt(gap) + int64(start)) + } else { + ch = chars[getCryptoRandomInt(gap)+int64(start)] + } + + if letters && unicode.IsLetter(ch) || numbers && unicode.IsDigit(ch) || !letters && !numbers { + if ch >= 56320 && ch <= 57343 { // low surrogate range + if count == 0 { + count++ + } else { + // Insert low surrogate + buffer[count] = ch + count-- + // Insert high surrogate + buffer[count] = rune(55296 + getCryptoRandomInt(128)) + } + } else if ch >= 55296 && ch <= 56191 { // High surrogates range (Partial) + if count == 0 { + count++ + } else { + // Insert low surrogate + buffer[count] = rune(56320 + getCryptoRandomInt(128)) + count-- + // Insert high surrogate + buffer[count] = ch + } + } else if ch >= 56192 && ch <= 56319 { + // private high surrogate, skip it + count++ + } else { + // not one of the surrogates* + buffer[count] = ch + } + } else { + count++ + } + } + return string(buffer), nil +} + +func getCryptoRandomInt(count int) int64 { + nBig, err := rand.Int(rand.Reader, big.NewInt(int64(count))) + if err != nil { + panic(err) + } + return nBig.Int64() +} diff --git a/vendor/github.com/Masterminds/goutils/randomstringutils.go b/vendor/github.com/Masterminds/goutils/randomstringutils.go new file mode 100644 index 0000000000..272670231a --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/randomstringutils.go @@ -0,0 +1,248 @@ +/* +Copyright 2014 Alexander Okoli + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package goutils + +import ( + "fmt" + "math" + "math/rand" + "time" + "unicode" +) + +// RANDOM provides the time-based seed used to generate random numbers +var RANDOM = rand.New(rand.NewSource(time.Now().UnixNano())) + +/* +RandomNonAlphaNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of all characters (ASCII/Unicode values between 0 to 2,147,483,647 (math.MaxInt32)). + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomNonAlphaNumeric(count int) (string, error) { + return RandomAlphaNumericCustom(count, false, false) +} + +/* +RandomAscii creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of characters whose ASCII value is between 32 and 126 (inclusive). + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomAscii(count int) (string, error) { + return Random(count, 32, 127, false, false) +} + +/* +RandomNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of numeric characters. + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomNumeric(count int) (string, error) { + return Random(count, 0, 0, false, true) +} + +/* +RandomAlphabetic creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alphabetic characters. + +Parameters: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomAlphabetic(count int) (string, error) { + return Random(count, 0, 0, true, false) +} + +/* +RandomAlphaNumeric creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alpha-numeric characters. + +Parameter: + count - the length of random string to create + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomAlphaNumeric(count int) (string, error) { + return Random(count, 0, 0, true, true) +} + +/* +RandomAlphaNumericCustom creates a random string whose length is the number of characters specified. +Characters will be chosen from the set of alpha-numeric characters as indicated by the arguments. + +Parameters: + count - the length of random string to create + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func RandomAlphaNumericCustom(count int, letters bool, numbers bool) (string, error) { + return Random(count, 0, 0, letters, numbers) +} + +/* +Random creates a random string based on a variety of options, using default source of randomness. +This method has exactly the same semantics as RandomSeed(int, int, int, bool, bool, []char, *rand.Rand), but +instead of using an externally supplied source of randomness, it uses the internal *rand.Rand instance. + +Parameters: + count - the length of random string to create + start - the position in set of chars (ASCII/Unicode int) to start at + end - the position in set of chars (ASCII/Unicode int) to end before + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars. + +Returns: + string - the random string + error - an error stemming from an invalid parameter within underlying function, RandomSeed(...) +*/ +func Random(count int, start int, end int, letters bool, numbers bool, chars ...rune) (string, error) { + return RandomSeed(count, start, end, letters, numbers, chars, RANDOM) +} + +/* +RandomSeed creates a random string based on a variety of options, using supplied source of randomness. +If the parameters start and end are both 0, start and end are set to ' ' and 'z', the ASCII printable characters, will be used, +unless letters and numbers are both false, in which case, start and end are set to 0 and math.MaxInt32, respectively. +If chars is not nil, characters stored in chars that are between start and end are chosen. +This method accepts a user-supplied *rand.Rand instance to use as a source of randomness. By seeding a single *rand.Rand instance +with a fixed seed and using it for each call, the same random sequence of strings can be generated repeatedly and predictably. + +Parameters: + count - the length of random string to create + start - the position in set of chars (ASCII/Unicode decimals) to start at + end - the position in set of chars (ASCII/Unicode decimals) to end before + letters - if true, generated string may include alphabetic characters + numbers - if true, generated string may include numeric characters + chars - the set of chars to choose randoms from. If nil, then it will use the set of all chars. + random - a source of randomness. + +Returns: + string - the random string + error - an error stemming from invalid parameters: if count < 0; or the provided chars array is empty; or end <= start; or end > len(chars) +*/ +func RandomSeed(count int, start int, end int, letters bool, numbers bool, chars []rune, random *rand.Rand) (string, error) { + + if count == 0 { + return "", nil + } else if count < 0 { + err := fmt.Errorf("randomstringutils illegal argument: Requested random string length %v is less than 0.", count) // equiv to err := errors.New("...") + return "", err + } + if chars != nil && len(chars) == 0 { + err := fmt.Errorf("randomstringutils illegal argument: The chars array must not be empty") + return "", err + } + + if start == 0 && end == 0 { + if chars != nil { + end = len(chars) + } else { + if !letters && !numbers { + end = math.MaxInt32 + } else { + end = 'z' + 1 + start = ' ' + } + } + } else { + if end <= start { + err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) must be greater than start (%v)", end, start) + return "", err + } + + if chars != nil && end > len(chars) { + err := fmt.Errorf("randomstringutils illegal argument: Parameter end (%v) cannot be greater than len(chars) (%v)", end, len(chars)) + return "", err + } + } + + buffer := make([]rune, count) + gap := end - start + + // high-surrogates range, (\uD800-\uDBFF) = 55296 - 56319 + // low-surrogates range, (\uDC00-\uDFFF) = 56320 - 57343 + + for count != 0 { + count-- + var ch rune + if chars == nil { + ch = rune(random.Intn(gap) + start) + } else { + ch = chars[random.Intn(gap)+start] + } + + if letters && unicode.IsLetter(ch) || numbers && unicode.IsDigit(ch) || !letters && !numbers { + if ch >= 56320 && ch <= 57343 { // low surrogate range + if count == 0 { + count++ + } else { + // Insert low surrogate + buffer[count] = ch + count-- + // Insert high surrogate + buffer[count] = rune(55296 + random.Intn(128)) + } + } else if ch >= 55296 && ch <= 56191 { // High surrogates range (Partial) + if count == 0 { + count++ + } else { + // Insert low surrogate + buffer[count] = rune(56320 + random.Intn(128)) + count-- + // Insert high surrogate + buffer[count] = ch + } + } else if ch >= 56192 && ch <= 56319 { + // private high surrogate, skip it + count++ + } else { + // not one of the surrogates* + buffer[count] = ch + } + } else { + count++ + } + } + return string(buffer), nil +} diff --git a/vendor/github.com/Masterminds/goutils/stringutils.go b/vendor/github.com/Masterminds/goutils/stringutils.go new file mode 100644 index 0000000000..741bb530e8 --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/stringutils.go @@ -0,0 +1,240 @@ +/* +Copyright 2014 Alexander Okoli + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package goutils + +import ( + "bytes" + "fmt" + "strings" + "unicode" +) + +// Typically returned by functions where a searched item cannot be found +const INDEX_NOT_FOUND = -1 + +/* +Abbreviate abbreviates a string using ellipses. This will turn the string "Now is the time for all good men" into "Now is the time for..." + +Specifically, the algorithm is as follows: + + - If str is less than maxWidth characters long, return it. + - Else abbreviate it to (str[0:maxWidth - 3] + "..."). + - If maxWidth is less than 4, return an illegal argument error. + - In no case will it return a string of length greater than maxWidth. + +Parameters: + str - the string to check + maxWidth - maximum length of result string, must be at least 4 + +Returns: + string - abbreviated string + error - if the width is too small +*/ +func Abbreviate(str string, maxWidth int) (string, error) { + return AbbreviateFull(str, 0, maxWidth) +} + +/* +AbbreviateFull abbreviates a string using ellipses. This will turn the string "Now is the time for all good men" into "...is the time for..." +This function works like Abbreviate(string, int), but allows you to specify a "left edge" offset. Note that this left edge is not +necessarily going to be the leftmost character in the result, or the first character following the ellipses, but it will appear +somewhere in the result. +In no case will it return a string of length greater than maxWidth. + +Parameters: + str - the string to check + offset - left edge of source string + maxWidth - maximum length of result string, must be at least 4 + +Returns: + string - abbreviated string + error - if the width is too small +*/ +func AbbreviateFull(str string, offset int, maxWidth int) (string, error) { + if str == "" { + return "", nil + } + if maxWidth < 4 { + err := fmt.Errorf("stringutils illegal argument: Minimum abbreviation width is 4") + return "", err + } + if len(str) <= maxWidth { + return str, nil + } + if offset > len(str) { + offset = len(str) + } + if len(str)-offset < (maxWidth - 3) { // 15 - 5 < 10 - 3 = 10 < 7 + offset = len(str) - (maxWidth - 3) + } + abrevMarker := "..." + if offset <= 4 { + return str[0:maxWidth-3] + abrevMarker, nil // str.substring(0, maxWidth - 3) + abrevMarker; + } + if maxWidth < 7 { + err := fmt.Errorf("stringutils illegal argument: Minimum abbreviation width with offset is 7") + return "", err + } + if (offset + maxWidth - 3) < len(str) { // 5 + (10-3) < 15 = 12 < 15 + abrevStr, _ := Abbreviate(str[offset:len(str)], (maxWidth - 3)) + return abrevMarker + abrevStr, nil // abrevMarker + abbreviate(str.substring(offset), maxWidth - 3); + } + return abrevMarker + str[(len(str)-(maxWidth-3)):len(str)], nil // abrevMarker + str.substring(str.length() - (maxWidth - 3)); +} + +/* +DeleteWhiteSpace deletes all whitespaces from a string as defined by unicode.IsSpace(rune). +It returns the string without whitespaces. + +Parameter: + str - the string to delete whitespace from, may be nil + +Returns: + the string without whitespaces +*/ +func DeleteWhiteSpace(str string) string { + if str == "" { + return str + } + sz := len(str) + var chs bytes.Buffer + count := 0 + for i := 0; i < sz; i++ { + ch := rune(str[i]) + if !unicode.IsSpace(ch) { + chs.WriteRune(ch) + count++ + } + } + if count == sz { + return str + } + return chs.String() +} + +/* +IndexOfDifference compares two strings, and returns the index at which the strings begin to differ. + +Parameters: + str1 - the first string + str2 - the second string + +Returns: + the index where str1 and str2 begin to differ; -1 if they are equal +*/ +func IndexOfDifference(str1 string, str2 string) int { + if str1 == str2 { + return INDEX_NOT_FOUND + } + if IsEmpty(str1) || IsEmpty(str2) { + return 0 + } + var i int + for i = 0; i < len(str1) && i < len(str2); i++ { + if rune(str1[i]) != rune(str2[i]) { + break + } + } + if i < len(str2) || i < len(str1) { + return i + } + return INDEX_NOT_FOUND +} + +/* +IsBlank checks if a string is whitespace or empty (""). Observe the following behavior: + + goutils.IsBlank("") = true + goutils.IsBlank(" ") = true + goutils.IsBlank("bob") = false + goutils.IsBlank(" bob ") = false + +Parameter: + str - the string to check + +Returns: + true - if the string is whitespace or empty ("") +*/ +func IsBlank(str string) bool { + strLen := len(str) + if str == "" || strLen == 0 { + return true + } + for i := 0; i < strLen; i++ { + if unicode.IsSpace(rune(str[i])) == false { + return false + } + } + return true +} + +/* +IndexOf returns the index of the first instance of sub in str, with the search beginning from the +index start point specified. -1 is returned if sub is not present in str. + +An empty string ("") will return -1 (INDEX_NOT_FOUND). A negative start position is treated as zero. +A start position greater than the string length returns -1. + +Parameters: + str - the string to check + sub - the substring to find + start - the start position; negative treated as zero + +Returns: + the first index where the sub string was found (always >= start) +*/ +func IndexOf(str string, sub string, start int) int { + + if start < 0 { + start = 0 + } + + if len(str) < start { + return INDEX_NOT_FOUND + } + + if IsEmpty(str) || IsEmpty(sub) { + return INDEX_NOT_FOUND + } + + partialIndex := strings.Index(str[start:len(str)], sub) + if partialIndex == -1 { + return INDEX_NOT_FOUND + } + return partialIndex + start +} + +// IsEmpty checks if a string is empty (""). Returns true if empty, and false otherwise. +func IsEmpty(str string) bool { + return len(str) == 0 +} + +// Returns either the passed in string, or if the string is empty, the value of defaultStr. +func DefaultString(str string, defaultStr string) string { + if IsEmpty(str) { + return defaultStr + } + return str +} + +// Returns either the passed in string, or if the string is whitespace, empty (""), the value of defaultStr. +func DefaultIfBlank(str string, defaultStr string) string { + if IsBlank(str) { + return defaultStr + } + return str +} diff --git a/vendor/github.com/Masterminds/goutils/wordutils.go b/vendor/github.com/Masterminds/goutils/wordutils.go new file mode 100644 index 0000000000..034cad8e21 --- /dev/null +++ b/vendor/github.com/Masterminds/goutils/wordutils.go @@ -0,0 +1,357 @@ +/* +Copyright 2014 Alexander Okoli + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +/* +Package goutils provides utility functions to manipulate strings in various ways. +The code snippets below show examples of how to use goutils. Some functions return +errors while others do not, so usage would vary as a result. + +Example: + + package main + + import ( + "fmt" + "github.com/aokoli/goutils" + ) + + func main() { + + // EXAMPLE 1: A goutils function which returns no errors + fmt.Println (goutils.Initials("John Doe Foo")) // Prints out "JDF" + + + + // EXAMPLE 2: A goutils function which returns an error + rand1, err1 := goutils.Random (-1, 0, 0, true, true) + + if err1 != nil { + fmt.Println(err1) // Prints out error message because -1 was entered as the first parameter in goutils.Random(...) + } else { + fmt.Println(rand1) + } + } +*/ +package goutils + +import ( + "bytes" + "strings" + "unicode" +) + +// VERSION indicates the current version of goutils +const VERSION = "1.0.0" + +/* +Wrap wraps a single line of text, identifying words by ' '. +New lines will be separated by '\n'. Very long words, such as URLs will not be wrapped. +Leading spaces on a new line are stripped. Trailing spaces are not stripped. + +Parameters: + str - the string to be word wrapped + wrapLength - the column (a column can fit only one character) to wrap the words at, less than 1 is treated as 1 + +Returns: + a line with newlines inserted +*/ +func Wrap(str string, wrapLength int) string { + return WrapCustom(str, wrapLength, "", false) +} + +/* +WrapCustom wraps a single line of text, identifying words by ' '. +Leading spaces on a new line are stripped. Trailing spaces are not stripped. + +Parameters: + str - the string to be word wrapped + wrapLength - the column number (a column can fit only one character) to wrap the words at, less than 1 is treated as 1 + newLineStr - the string to insert for a new line, "" uses '\n' + wrapLongWords - true if long words (such as URLs) should be wrapped + +Returns: + a line with newlines inserted +*/ +func WrapCustom(str string, wrapLength int, newLineStr string, wrapLongWords bool) string { + + if str == "" { + return "" + } + if newLineStr == "" { + newLineStr = "\n" // TODO Assumes "\n" is seperator. Explore SystemUtils.LINE_SEPARATOR from Apache Commons + } + if wrapLength < 1 { + wrapLength = 1 + } + + inputLineLength := len(str) + offset := 0 + + var wrappedLine bytes.Buffer + + for inputLineLength-offset > wrapLength { + + if rune(str[offset]) == ' ' { + offset++ + continue + } + + end := wrapLength + offset + 1 + spaceToWrapAt := strings.LastIndex(str[offset:end], " ") + offset + + if spaceToWrapAt >= offset { + // normal word (not longer than wrapLength) + wrappedLine.WriteString(str[offset:spaceToWrapAt]) + wrappedLine.WriteString(newLineStr) + offset = spaceToWrapAt + 1 + + } else { + // long word or URL + if wrapLongWords { + end := wrapLength + offset + // long words are wrapped one line at a time + wrappedLine.WriteString(str[offset:end]) + wrappedLine.WriteString(newLineStr) + offset += wrapLength + } else { + // long words aren't wrapped, just extended beyond limit + end := wrapLength + offset + index := strings.IndexRune(str[end:len(str)], ' ') + if index == -1 { + wrappedLine.WriteString(str[offset:len(str)]) + offset = inputLineLength + } else { + spaceToWrapAt = index + end + wrappedLine.WriteString(str[offset:spaceToWrapAt]) + wrappedLine.WriteString(newLineStr) + offset = spaceToWrapAt + 1 + } + } + } + } + + wrappedLine.WriteString(str[offset:len(str)]) + + return wrappedLine.String() + +} + +/* +Capitalize capitalizes all the delimiter separated words in a string. Only the first letter of each word is changed. +To convert the rest of each word to lowercase at the same time, use CapitalizeFully(str string, delimiters ...rune). +The delimiters represent a set of characters understood to separate words. The first string character +and the first non-delimiter character after a delimiter will be capitalized. A "" input string returns "". +Capitalization uses the Unicode title case, normally equivalent to upper case. + +Parameters: + str - the string to capitalize + delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter + +Returns: + capitalized string +*/ +func Capitalize(str string, delimiters ...rune) string { + + var delimLen int + + if delimiters == nil { + delimLen = -1 + } else { + delimLen = len(delimiters) + } + + if str == "" || delimLen == 0 { + return str + } + + buffer := []rune(str) + capitalizeNext := true + for i := 0; i < len(buffer); i++ { + ch := buffer[i] + if isDelimiter(ch, delimiters...) { + capitalizeNext = true + } else if capitalizeNext { + buffer[i] = unicode.ToTitle(ch) + capitalizeNext = false + } + } + return string(buffer) + +} + +/* +CapitalizeFully converts all the delimiter separated words in a string into capitalized words, that is each word is made up of a +titlecase character and then a series of lowercase characters. The delimiters represent a set of characters understood +to separate words. The first string character and the first non-delimiter character after a delimiter will be capitalized. +Capitalization uses the Unicode title case, normally equivalent to upper case. + +Parameters: + str - the string to capitalize fully + delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter + +Returns: + capitalized string +*/ +func CapitalizeFully(str string, delimiters ...rune) string { + + var delimLen int + + if delimiters == nil { + delimLen = -1 + } else { + delimLen = len(delimiters) + } + + if str == "" || delimLen == 0 { + return str + } + str = strings.ToLower(str) + return Capitalize(str, delimiters...) +} + +/* +Uncapitalize uncapitalizes all the whitespace separated words in a string. Only the first letter of each word is changed. +The delimiters represent a set of characters understood to separate words. The first string character and the first non-delimiter +character after a delimiter will be uncapitalized. Whitespace is defined by unicode.IsSpace(char). + +Parameters: + str - the string to uncapitalize fully + delimiters - set of characters to determine capitalization, exclusion of this parameter means whitespace would be delimeter + +Returns: + uncapitalized string +*/ +func Uncapitalize(str string, delimiters ...rune) string { + + var delimLen int + + if delimiters == nil { + delimLen = -1 + } else { + delimLen = len(delimiters) + } + + if str == "" || delimLen == 0 { + return str + } + + buffer := []rune(str) + uncapitalizeNext := true // TODO Always makes capitalize/un apply to first char. + for i := 0; i < len(buffer); i++ { + ch := buffer[i] + if isDelimiter(ch, delimiters...) { + uncapitalizeNext = true + } else if uncapitalizeNext { + buffer[i] = unicode.ToLower(ch) + uncapitalizeNext = false + } + } + return string(buffer) +} + +/* +SwapCase swaps the case of a string using a word based algorithm. + +Conversion algorithm: + + Upper case character converts to Lower case + Title case character converts to Lower case + Lower case character after Whitespace or at start converts to Title case + Other Lower case character converts to Upper case + Whitespace is defined by unicode.IsSpace(char). + +Parameters: + str - the string to swap case + +Returns: + the changed string +*/ +func SwapCase(str string) string { + if str == "" { + return str + } + buffer := []rune(str) + + whitespace := true + + for i := 0; i < len(buffer); i++ { + ch := buffer[i] + if unicode.IsUpper(ch) { + buffer[i] = unicode.ToLower(ch) + whitespace = false + } else if unicode.IsTitle(ch) { + buffer[i] = unicode.ToLower(ch) + whitespace = false + } else if unicode.IsLower(ch) { + if whitespace { + buffer[i] = unicode.ToTitle(ch) + whitespace = false + } else { + buffer[i] = unicode.ToUpper(ch) + } + } else { + whitespace = unicode.IsSpace(ch) + } + } + return string(buffer) +} + +/* +Initials extracts the initial letters from each word in the string. The first letter of the string and all first +letters after the defined delimiters are returned as a new string. Their case is not changed. If the delimiters +parameter is excluded, then Whitespace is used. Whitespace is defined by unicode.IsSpacea(char). An empty delimiter array returns an empty string. + +Parameters: + str - the string to get initials from + delimiters - set of characters to determine words, exclusion of this parameter means whitespace would be delimeter +Returns: + string of initial letters +*/ +func Initials(str string, delimiters ...rune) string { + if str == "" { + return str + } + if delimiters != nil && len(delimiters) == 0 { + return "" + } + strLen := len(str) + var buf bytes.Buffer + lastWasGap := true + for i := 0; i < strLen; i++ { + ch := rune(str[i]) + + if isDelimiter(ch, delimiters...) { + lastWasGap = true + } else if lastWasGap { + buf.WriteRune(ch) + lastWasGap = false + } + } + return buf.String() +} + +// private function (lower case func name) +func isDelimiter(ch rune, delimiters ...rune) bool { + if delimiters == nil { + return unicode.IsSpace(ch) + } + for _, delimiter := range delimiters { + if ch == delimiter { + return true + } + } + return false +} diff --git a/vendor/github.com/Masterminds/semver/v3/.gitignore b/vendor/github.com/Masterminds/semver/v3/.gitignore new file mode 100644 index 0000000000..6b061e6174 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/.gitignore @@ -0,0 +1 @@ +_fuzz/ \ No newline at end of file diff --git a/vendor/github.com/Masterminds/semver/v3/.golangci.yml b/vendor/github.com/Masterminds/semver/v3/.golangci.yml new file mode 100644 index 0000000000..fdbdf1448c --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/.golangci.yml @@ -0,0 +1,26 @@ +run: + deadline: 2m + +linters: + disable-all: true + enable: + - deadcode + - dupl + - errcheck + - gofmt + - goimports + - golint + - gosimple + - govet + - ineffassign + - misspell + - nakedret + - structcheck + - unused + - varcheck + +linters-settings: + gofmt: + simplify: true + dupl: + threshold: 400 diff --git a/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md new file mode 100644 index 0000000000..1f90c38d26 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/CHANGELOG.md @@ -0,0 +1,194 @@ +# Changelog + +## 3.1.1 (2020-11-23) + +### Fixed + +- #158: Fixed issue with generated regex operation order that could cause problem + +## 3.1.0 (2020-04-15) + +### Added + +- #131: Add support for serializing/deserializing SQL (thanks @ryancurrah) + +### Changed + +- #148: More accurate validation messages on constraints + +## 3.0.3 (2019-12-13) + +### Fixed + +- #141: Fixed issue with <= comparison + +## 3.0.2 (2019-11-14) + +### Fixed + +- #134: Fixed broken constraint checking with ^0.0 (thanks @krmichelos) + +## 3.0.1 (2019-09-13) + +### Fixed + +- #125: Fixes issue with module path for v3 + +## 3.0.0 (2019-09-12) + +This is a major release of the semver package which includes API changes. The Go +API is compatible with ^1. The Go API was not changed because many people are using +`go get` without Go modules for their applications and API breaking changes cause +errors which we have or would need to support. + +The changes in this release are the handling based on the data passed into the +functions. These are described in the added and changed sections below. + +### Added + +- StrictNewVersion function. This is similar to NewVersion but will return an + error if the version passed in is not a strict semantic version. For example, + 1.2.3 would pass but v1.2.3 or 1.2 would fail because they are not strictly + speaking semantic versions. This function is faster, performs fewer operations, + and uses fewer allocations than NewVersion. +- Fuzzing has been performed on NewVersion, StrictNewVersion, and NewConstraint. + The Makefile contains the operations used. For more information on you can start + on Wikipedia at https://en.wikipedia.org/wiki/Fuzzing +- Now using Go modules + +### Changed + +- NewVersion has proper prerelease and metadata validation with error messages + to signal an issue with either of them +- ^ now operates using a similar set of rules to npm/js and Rust/Cargo. If the + version is >=1 the ^ ranges works the same as v1. For major versions of 0 the + rules have changed. The minor version is treated as the stable version unless + a patch is specified and then it is equivalent to =. One difference from npm/js + is that prereleases there are only to a specific version (e.g. 1.2.3). + Prereleases here look over multiple versions and follow semantic version + ordering rules. This pattern now follows along with the expected and requested + handling of this packaged by numerous users. + +## 1.5.0 (2019-09-11) + +### Added + +- #103: Add basic fuzzing for `NewVersion()` (thanks @jesse-c) + +### Changed + +- #82: Clarify wildcard meaning in range constraints and update tests for it (thanks @greysteil) +- #83: Clarify caret operator range for pre-1.0.0 dependencies (thanks @greysteil) +- #72: Adding docs comment pointing to vert for a cli +- #71: Update the docs on pre-release comparator handling +- #89: Test with new go versions (thanks @thedevsaddam) +- #87: Added $ to ValidPrerelease for better validation (thanks @jeremycarroll) + +### Fixed + +- #78: Fix unchecked error in example code (thanks @ravron) +- #70: Fix the handling of pre-releases and the 0.0.0 release edge case +- #97: Fixed copyright file for proper display on GitHub +- #107: Fix handling prerelease when sorting alphanum and num +- #109: Fixed where Validate sometimes returns wrong message on error + +## 1.4.2 (2018-04-10) + +### Changed + +- #72: Updated the docs to point to vert for a console appliaction +- #71: Update the docs on pre-release comparator handling + +### Fixed + +- #70: Fix the handling of pre-releases and the 0.0.0 release edge case + +## 1.4.1 (2018-04-02) + +### Fixed + +- Fixed #64: Fix pre-release precedence issue (thanks @uudashr) + +## 1.4.0 (2017-10-04) + +### Changed + +- #61: Update NewVersion to parse ints with a 64bit int size (thanks @zknill) + +## 1.3.1 (2017-07-10) + +### Fixed + +- Fixed #57: number comparisons in prerelease sometimes inaccurate + +## 1.3.0 (2017-05-02) + +### Added + +- #45: Added json (un)marshaling support (thanks @mh-cbon) +- Stability marker. See https://masterminds.github.io/stability/ + +### Fixed + +- #51: Fix handling of single digit tilde constraint (thanks @dgodd) + +### Changed + +- #55: The godoc icon moved from png to svg + +## 1.2.3 (2017-04-03) + +### Fixed + +- #46: Fixed 0.x.x and 0.0.x in constraints being treated as * + +## Release 1.2.2 (2016-12-13) + +### Fixed + +- #34: Fixed issue where hyphen range was not working with pre-release parsing. + +## Release 1.2.1 (2016-11-28) + +### Fixed + +- #24: Fixed edge case issue where constraint "> 0" does not handle "0.0.1-alpha" + properly. + +## Release 1.2.0 (2016-11-04) + +### Added + +- #20: Added MustParse function for versions (thanks @adamreese) +- #15: Added increment methods on versions (thanks @mh-cbon) + +### Fixed + +- Issue #21: Per the SemVer spec (section 9) a pre-release is unstable and + might not satisfy the intended compatibility. The change here ignores pre-releases + on constraint checks (e.g., ~ or ^) when a pre-release is not part of the + constraint. For example, `^1.2.3` will ignore pre-releases while + `^1.2.3-alpha` will include them. + +## Release 1.1.1 (2016-06-30) + +### Changed + +- Issue #9: Speed up version comparison performance (thanks @sdboyer) +- Issue #8: Added benchmarks (thanks @sdboyer) +- Updated Go Report Card URL to new location +- Updated Readme to add code snippet formatting (thanks @mh-cbon) +- Updating tagging to v[SemVer] structure for compatibility with other tools. + +## Release 1.1.0 (2016-03-11) + +- Issue #2: Implemented validation to provide reasons a versions failed a + constraint. + +## Release 1.0.1 (2015-12-31) + +- Fixed #1: * constraint failing on valid versions. + +## Release 1.0.0 (2015-10-20) + +- Initial release diff --git a/vendor/github.com/Masterminds/semver/v3/LICENSE.txt b/vendor/github.com/Masterminds/semver/v3/LICENSE.txt new file mode 100644 index 0000000000..9ff7da9c48 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (C) 2014-2019, Matt Butcher and Matt Farina + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Masterminds/semver/v3/Makefile b/vendor/github.com/Masterminds/semver/v3/Makefile new file mode 100644 index 0000000000..eac19178fb --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/Makefile @@ -0,0 +1,37 @@ +GOPATH=$(shell go env GOPATH) +GOLANGCI_LINT=$(GOPATH)/bin/golangci-lint +GOFUZZBUILD = $(GOPATH)/bin/go-fuzz-build +GOFUZZ = $(GOPATH)/bin/go-fuzz + +.PHONY: lint +lint: $(GOLANGCI_LINT) + @echo "==> Linting codebase" + @$(GOLANGCI_LINT) run + +.PHONY: test +test: + @echo "==> Running tests" + GO111MODULE=on go test -v + +.PHONY: test-cover +test-cover: + @echo "==> Running Tests with coverage" + GO111MODULE=on go test -cover . + +.PHONY: fuzz +fuzz: $(GOFUZZBUILD) $(GOFUZZ) + @echo "==> Fuzz testing" + $(GOFUZZBUILD) + $(GOFUZZ) -workdir=_fuzz + +$(GOLANGCI_LINT): + # Install golangci-lint. The configuration for it is in the .golangci.yml + # file in the root of the repository + echo ${GOPATH} + curl -sfL https://install.goreleaser.com/github.com/golangci/golangci-lint.sh | sh -s -- -b $(GOPATH)/bin v1.17.1 + +$(GOFUZZBUILD): + cd / && go get -u github.com/dvyukov/go-fuzz/go-fuzz-build + +$(GOFUZZ): + cd / && go get -u github.com/dvyukov/go-fuzz/go-fuzz github.com/dvyukov/go-fuzz/go-fuzz-dep \ No newline at end of file diff --git a/vendor/github.com/Masterminds/semver/v3/README.md b/vendor/github.com/Masterminds/semver/v3/README.md new file mode 100644 index 0000000000..d8f54dcbd3 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/README.md @@ -0,0 +1,244 @@ +# SemVer + +The `semver` package provides the ability to work with [Semantic Versions](http://semver.org) in Go. Specifically it provides the ability to: + +* Parse semantic versions +* Sort semantic versions +* Check if a semantic version fits within a set of constraints +* Optionally work with a `v` prefix + +[![Stability: +Active](https://masterminds.github.io/stability/active.svg)](https://masterminds.github.io/stability/active.html) +[![](https://github.com/Masterminds/semver/workflows/Tests/badge.svg)](https://github.com/Masterminds/semver/actions) +[![GoDoc](https://img.shields.io/static/v1?label=godoc&message=reference&color=blue)](https://pkg.go.dev/github.com/Masterminds/semver/v3) +[![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/semver)](https://goreportcard.com/report/github.com/Masterminds/semver) + +If you are looking for a command line tool for version comparisons please see +[vert](https://github.com/Masterminds/vert) which uses this library. + +## Package Versions + +There are three major versions fo the `semver` package. + +* 3.x.x is the new stable and active version. This version is focused on constraint + compatibility for range handling in other tools from other languages. It has + a similar API to the v1 releases. The development of this version is on the master + branch. The documentation for this version is below. +* 2.x was developed primarily for [dep](https://github.com/golang/dep). There are + no tagged releases and the development was performed by [@sdboyer](https://github.com/sdboyer). + There are API breaking changes from v1. This version lives on the [2.x branch](https://github.com/Masterminds/semver/tree/2.x). +* 1.x.x is the most widely used version with numerous tagged releases. This is the + previous stable and is still maintained for bug fixes. The development, to fix + bugs, occurs on the release-1 branch. You can read the documentation [here](https://github.com/Masterminds/semver/blob/release-1/README.md). + +## Parsing Semantic Versions + +There are two functions that can parse semantic versions. The `StrictNewVersion` +function only parses valid version 2 semantic versions as outlined in the +specification. The `NewVersion` function attempts to coerce a version into a +semantic version and parse it. For example, if there is a leading v or a version +listed without all 3 parts (e.g. `v1.2`) it will attempt to coerce it into a valid +semantic version (e.g., 1.2.0). In both cases a `Version` object is returned +that can be sorted, compared, and used in constraints. + +When parsing a version an error is returned if there is an issue parsing the +version. For example, + + v, err := semver.NewVersion("1.2.3-beta.1+build345") + +The version object has methods to get the parts of the version, compare it to +other versions, convert the version back into a string, and get the original +string. Getting the original string is useful if the semantic version was coerced +into a valid form. + +## Sorting Semantic Versions + +A set of versions can be sorted using the `sort` package from the standard library. +For example, + +```go +raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} +vs := make([]*semver.Version, len(raw)) +for i, r := range raw { + v, err := semver.NewVersion(r) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + vs[i] = v +} + +sort.Sort(semver.Collection(vs)) +``` + +## Checking Version Constraints + +There are two methods for comparing versions. One uses comparison methods on +`Version` instances and the other uses `Constraints`. There are some important +differences to notes between these two methods of comparison. + +1. When two versions are compared using functions such as `Compare`, `LessThan`, + and others it will follow the specification and always include prereleases + within the comparison. It will provide an answer that is valid with the + comparison section of the spec at https://semver.org/#spec-item-11 +2. When constraint checking is used for checks or validation it will follow a + different set of rules that are common for ranges with tools like npm/js + and Rust/Cargo. This includes considering prereleases to be invalid if the + ranges does not include one. If you want to have it include pre-releases a + simple solution is to include `-0` in your range. +3. Constraint ranges can have some complex rules including the shorthand use of + ~ and ^. For more details on those see the options below. + +There are differences between the two methods or checking versions because the +comparison methods on `Version` follow the specification while comparison ranges +are not part of the specification. Different packages and tools have taken it +upon themselves to come up with range rules. This has resulted in differences. +For example, npm/js and Cargo/Rust follow similar patterns while PHP has a +different pattern for ^. The comparison features in this package follow the +npm/js and Cargo/Rust lead because applications using it have followed similar +patters with their versions. + +Checking a version against version constraints is one of the most featureful +parts of the package. + +```go +c, err := semver.NewConstraint(">= 1.2.3") +if err != nil { + // Handle constraint not being parsable. +} + +v, err := semver.NewVersion("1.3") +if err != nil { + // Handle version not being parsable. +} +// Check if the version meets the constraints. The a variable will be true. +a := c.Check(v) +``` + +### Basic Comparisons + +There are two elements to the comparisons. First, a comparison string is a list +of space or comma separated AND comparisons. These are then separated by || (OR) +comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a +comparison that's greater than or equal to 1.2 and less than 3.0.0 or is +greater than or equal to 4.2.3. + +The basic comparisons are: + +* `=`: equal (aliased to no operator) +* `!=`: not equal +* `>`: greater than +* `<`: less than +* `>=`: greater than or equal to +* `<=`: less than or equal to + +### Working With Prerelease Versions + +Pre-releases, for those not familiar with them, are used for software releases +prior to stable or generally available releases. Examples of prereleases include +development, alpha, beta, and release candidate releases. A prerelease may be +a version such as `1.2.3-beta.1` while the stable release would be `1.2.3`. In the +order of precedence, prereleases come before their associated releases. In this +example `1.2.3-beta.1 < 1.2.3`. + +According to the Semantic Version specification prereleases may not be +API compliant with their release counterpart. It says, + +> A pre-release version indicates that the version is unstable and might not satisfy the intended compatibility requirements as denoted by its associated normal version. + +SemVer comparisons using constraints without a prerelease comparator will skip +prerelease versions. For example, `>=1.2.3` will skip prereleases when looking +at a list of releases while `>=1.2.3-0` will evaluate and find prereleases. + +The reason for the `0` as a pre-release version in the example comparison is +because pre-releases can only contain ASCII alphanumerics and hyphens (along with +`.` separators), per the spec. Sorting happens in ASCII sort order, again per the +spec. The lowest character is a `0` in ASCII sort order +(see an [ASCII Table](http://www.asciitable.com/)) + +Understanding ASCII sort ordering is important because A-Z comes before a-z. That +means `>=1.2.3-BETA` will return `1.2.3-alpha`. What you might expect from case +sensitivity doesn't apply here. This is due to ASCII sort ordering which is what +the spec specifies. + +### Hyphen Range Comparisons + +There are multiple methods to handle ranges and the first is hyphens ranges. +These look like: + +* `1.2 - 1.4.5` which is equivalent to `>= 1.2 <= 1.4.5` +* `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5` + +### Wildcards In Comparisons + +The `x`, `X`, and `*` characters can be used as a wildcard character. This works +for all comparison operators. When used on the `=` operator it falls +back to the patch level comparison (see tilde below). For example, + +* `1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` +* `>= 1.2.x` is equivalent to `>= 1.2.0` +* `<= 2.x` is equivalent to `< 3` +* `*` is equivalent to `>= 0.0.0` + +### Tilde Range Comparisons (Patch) + +The tilde (`~`) comparison operator is for patch level ranges when a minor +version is specified and major level changes when the minor number is missing. +For example, + +* `~1.2.3` is equivalent to `>= 1.2.3, < 1.3.0` +* `~1` is equivalent to `>= 1, < 2` +* `~2.3` is equivalent to `>= 2.3, < 2.4` +* `~1.2.x` is equivalent to `>= 1.2.0, < 1.3.0` +* `~1.x` is equivalent to `>= 1, < 2` + +### Caret Range Comparisons (Major) + +The caret (`^`) comparison operator is for major level changes once a stable +(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts +as the API stability level. This is useful when comparisons of API versions as a +major change is API breaking. For example, + +* `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` +* `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` +* `^2.3` is equivalent to `>= 2.3, < 3` +* `^2.x` is equivalent to `>= 2.0.0, < 3` +* `^0.2.3` is equivalent to `>=0.2.3 <0.3.0` +* `^0.2` is equivalent to `>=0.2.0 <0.3.0` +* `^0.0.3` is equivalent to `>=0.0.3 <0.0.4` +* `^0.0` is equivalent to `>=0.0.0 <0.1.0` +* `^0` is equivalent to `>=0.0.0 <1.0.0` + +## Validation + +In addition to testing a version against a constraint, a version can be validated +against a constraint. When validation fails a slice of errors containing why a +version didn't meet the constraint is returned. For example, + +```go +c, err := semver.NewConstraint("<= 1.2.3, >= 1.4") +if err != nil { + // Handle constraint not being parseable. +} + +v, err := semver.NewVersion("1.3") +if err != nil { + // Handle version not being parseable. +} + +// Validate a version against a constraint. +a, msgs := c.Validate(v) +// a is false +for _, m := range msgs { + fmt.Println(m) + + // Loops over the errors which would read + // "1.3 is greater than 1.2.3" + // "1.3 is less than 1.4" +} +``` + +## Contribute + +If you find an issue or want to contribute please file an [issue](https://github.com/Masterminds/semver/issues) +or [create a pull request](https://github.com/Masterminds/semver/pulls). diff --git a/vendor/github.com/Masterminds/semver/v3/collection.go b/vendor/github.com/Masterminds/semver/v3/collection.go new file mode 100644 index 0000000000..a78235895f --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/collection.go @@ -0,0 +1,24 @@ +package semver + +// Collection is a collection of Version instances and implements the sort +// interface. See the sort package for more details. +// https://golang.org/pkg/sort/ +type Collection []*Version + +// Len returns the length of a collection. The number of Version instances +// on the slice. +func (c Collection) Len() int { + return len(c) +} + +// Less is needed for the sort interface to compare two Version objects on the +// slice. If checks if one is less than the other. +func (c Collection) Less(i, j int) bool { + return c[i].LessThan(c[j]) +} + +// Swap is needed for the sort interface to replace the Version objects +// at two different positions in the slice. +func (c Collection) Swap(i, j int) { + c[i], c[j] = c[j], c[i] +} diff --git a/vendor/github.com/Masterminds/semver/v3/constraints.go b/vendor/github.com/Masterminds/semver/v3/constraints.go new file mode 100644 index 0000000000..547613f044 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/constraints.go @@ -0,0 +1,568 @@ +package semver + +import ( + "bytes" + "errors" + "fmt" + "regexp" + "strings" +) + +// Constraints is one or more constraint that a semantic version can be +// checked against. +type Constraints struct { + constraints [][]*constraint +} + +// NewConstraint returns a Constraints instance that a Version instance can +// be checked against. If there is a parse error it will be returned. +func NewConstraint(c string) (*Constraints, error) { + + // Rewrite - ranges into a comparison operation. + c = rewriteRange(c) + + ors := strings.Split(c, "||") + or := make([][]*constraint, len(ors)) + for k, v := range ors { + + // TODO: Find a way to validate and fetch all the constraints in a simpler form + + // Validate the segment + if !validConstraintRegex.MatchString(v) { + return nil, fmt.Errorf("improper constraint: %s", v) + } + + cs := findConstraintRegex.FindAllString(v, -1) + if cs == nil { + cs = append(cs, v) + } + result := make([]*constraint, len(cs)) + for i, s := range cs { + pc, err := parseConstraint(s) + if err != nil { + return nil, err + } + + result[i] = pc + } + or[k] = result + } + + o := &Constraints{constraints: or} + return o, nil +} + +// Check tests if a version satisfies the constraints. +func (cs Constraints) Check(v *Version) bool { + // TODO(mattfarina): For v4 of this library consolidate the Check and Validate + // functions as the underlying functions make that possible now. + // loop over the ORs and check the inner ANDs + for _, o := range cs.constraints { + joy := true + for _, c := range o { + if check, _ := c.check(v); !check { + joy = false + break + } + } + + if joy { + return true + } + } + + return false +} + +// Validate checks if a version satisfies a constraint. If not a slice of +// reasons for the failure are returned in addition to a bool. +func (cs Constraints) Validate(v *Version) (bool, []error) { + // loop over the ORs and check the inner ANDs + var e []error + + // Capture the prerelease message only once. When it happens the first time + // this var is marked + var prerelesase bool + for _, o := range cs.constraints { + joy := true + for _, c := range o { + // Before running the check handle the case there the version is + // a prerelease and the check is not searching for prereleases. + if c.con.pre == "" && v.pre != "" { + if !prerelesase { + em := fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + e = append(e, em) + prerelesase = true + } + joy = false + + } else { + + if _, err := c.check(v); err != nil { + e = append(e, err) + joy = false + } + } + } + + if joy { + return true, []error{} + } + } + + return false, e +} + +func (cs Constraints) String() string { + buf := make([]string, len(cs.constraints)) + var tmp bytes.Buffer + + for k, v := range cs.constraints { + tmp.Reset() + vlen := len(v) + for kk, c := range v { + tmp.WriteString(c.string()) + + // Space separate the AND conditions + if vlen > 1 && kk < vlen-1 { + tmp.WriteString(" ") + } + } + buf[k] = tmp.String() + } + + return strings.Join(buf, " || ") +} + +var constraintOps map[string]cfunc +var constraintRegex *regexp.Regexp +var constraintRangeRegex *regexp.Regexp + +// Used to find individual constraints within a multi-constraint string +var findConstraintRegex *regexp.Regexp + +// Used to validate an segment of ANDs is valid +var validConstraintRegex *regexp.Regexp + +const cvRegex string = `v?([0-9|x|X|\*]+)(\.[0-9|x|X|\*]+)?(\.[0-9|x|X|\*]+)?` + + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + +func init() { + constraintOps = map[string]cfunc{ + "": constraintTildeOrEqual, + "=": constraintTildeOrEqual, + "!=": constraintNotEqual, + ">": constraintGreaterThan, + "<": constraintLessThan, + ">=": constraintGreaterThanEqual, + "=>": constraintGreaterThanEqual, + "<=": constraintLessThanEqual, + "=<": constraintLessThanEqual, + "~": constraintTilde, + "~>": constraintTilde, + "^": constraintCaret, + } + + ops := `=||!=|>|<|>=|=>|<=|=<|~|~>|\^` + + constraintRegex = regexp.MustCompile(fmt.Sprintf( + `^\s*(%s)\s*(%s)\s*$`, + ops, + cvRegex)) + + constraintRangeRegex = regexp.MustCompile(fmt.Sprintf( + `\s*(%s)\s+-\s+(%s)\s*`, + cvRegex, cvRegex)) + + findConstraintRegex = regexp.MustCompile(fmt.Sprintf( + `(%s)\s*(%s)`, + ops, + cvRegex)) + + validConstraintRegex = regexp.MustCompile(fmt.Sprintf( + `^(\s*(%s)\s*(%s)\s*\,?)+$`, + ops, + cvRegex)) +} + +// An individual constraint +type constraint struct { + // The version used in the constraint check. For example, if a constraint + // is '<= 2.0.0' the con a version instance representing 2.0.0. + con *Version + + // The original parsed version (e.g., 4.x from != 4.x) + orig string + + // The original operator for the constraint + origfunc string + + // When an x is used as part of the version (e.g., 1.x) + minorDirty bool + dirty bool + patchDirty bool +} + +// Check if a version meets the constraint +func (c *constraint) check(v *Version) (bool, error) { + return constraintOps[c.origfunc](v, c) +} + +// String prints an individual constraint into a string +func (c *constraint) string() string { + return c.origfunc + c.orig +} + +type cfunc func(v *Version, c *constraint) (bool, error) + +func parseConstraint(c string) (*constraint, error) { + if len(c) > 0 { + m := constraintRegex.FindStringSubmatch(c) + if m == nil { + return nil, fmt.Errorf("improper constraint: %s", c) + } + + cs := &constraint{ + orig: m[2], + origfunc: m[1], + } + + ver := m[2] + minorDirty := false + patchDirty := false + dirty := false + if isX(m[3]) || m[3] == "" { + ver = "0.0.0" + dirty = true + } else if isX(strings.TrimPrefix(m[4], ".")) || m[4] == "" { + minorDirty = true + dirty = true + ver = fmt.Sprintf("%s.0.0%s", m[3], m[6]) + } else if isX(strings.TrimPrefix(m[5], ".")) || m[5] == "" { + dirty = true + patchDirty = true + ver = fmt.Sprintf("%s%s.0%s", m[3], m[4], m[6]) + } + + con, err := NewVersion(ver) + if err != nil { + + // The constraintRegex should catch any regex parsing errors. So, + // we should never get here. + return nil, errors.New("constraint Parser Error") + } + + cs.con = con + cs.minorDirty = minorDirty + cs.patchDirty = patchDirty + cs.dirty = dirty + + return cs, nil + } + + // The rest is the special case where an empty string was passed in which + // is equivalent to * or >=0.0.0 + con, err := StrictNewVersion("0.0.0") + if err != nil { + + // The constraintRegex should catch any regex parsing errors. So, + // we should never get here. + return nil, errors.New("constraint Parser Error") + } + + cs := &constraint{ + con: con, + orig: c, + origfunc: "", + minorDirty: false, + patchDirty: false, + dirty: true, + } + return cs, nil +} + +// Constraint functions +func constraintNotEqual(v *Version, c *constraint) (bool, error) { + if c.dirty { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + if c.con.Major() != v.Major() { + return true, nil + } + if c.con.Minor() != v.Minor() && !c.minorDirty { + return true, nil + } else if c.minorDirty { + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } else if c.con.Patch() != v.Patch() && !c.patchDirty { + return true, nil + } else if c.patchDirty { + // Need to handle prereleases if present + if v.Prerelease() != "" || c.con.Prerelease() != "" { + eq := comparePrerelease(v.Prerelease(), c.con.Prerelease()) != 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } + } + + eq := v.Equal(c.con) + if eq { + return false, fmt.Errorf("%s is equal to %s", v, c.orig) + } + + return true, nil +} + +func constraintGreaterThan(v *Version, c *constraint) (bool, error) { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + var eq bool + + if !c.dirty { + eq = v.Compare(c.con) == 1 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } + + if v.Major() > c.con.Major() { + return true, nil + } else if v.Major() < c.con.Major() { + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } else if c.minorDirty { + // This is a range case such as >11. When the version is something like + // 11.1.0 is it not > 11. For that we would need 12 or higher + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } else if c.patchDirty { + // This is for ranges such as >11.1. A version of 11.1.1 is not greater + // which one of 11.2.1 is greater + eq = v.Minor() > c.con.Minor() + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) + } + + // If we have gotten here we are not comparing pre-preleases and can use the + // Compare function to accomplish that. + eq = v.Compare(c.con) == 1 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than or equal to %s", v, c.orig) +} + +func constraintLessThan(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + eq := v.Compare(c.con) < 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is greater than or equal to %s", v, c.orig) +} + +func constraintGreaterThanEqual(v *Version, c *constraint) (bool, error) { + + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + eq := v.Compare(c.con) >= 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is less than %s", v, c.orig) +} + +func constraintLessThanEqual(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + var eq bool + + if !c.dirty { + eq = v.Compare(c.con) <= 0 + if eq { + return true, nil + } + return false, fmt.Errorf("%s is greater than %s", v, c.orig) + } + + if v.Major() > c.con.Major() { + return false, fmt.Errorf("%s is greater than %s", v, c.orig) + } else if v.Major() == c.con.Major() && v.Minor() > c.con.Minor() && !c.minorDirty { + return false, fmt.Errorf("%s is greater than %s", v, c.orig) + } + + return true, nil +} + +// ~*, ~>* --> >= 0.0.0 (any) +// ~2, ~2.x, ~2.x.x, ~>2, ~>2.x ~>2.x.x --> >=2.0.0, <3.0.0 +// ~2.0, ~2.0.x, ~>2.0, ~>2.0.x --> >=2.0.0, <2.1.0 +// ~1.2, ~1.2.x, ~>1.2, ~>1.2.x --> >=1.2.0, <1.3.0 +// ~1.2.3, ~>1.2.3 --> >=1.2.3, <1.3.0 +// ~1.2.0, ~>1.2.0 --> >=1.2.0, <1.3.0 +func constraintTilde(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + if v.LessThan(c.con) { + return false, fmt.Errorf("%s is less than %s", v, c.orig) + } + + // ~0.0.0 is a special case where all constraints are accepted. It's + // equivalent to >= 0.0.0. + if c.con.Major() == 0 && c.con.Minor() == 0 && c.con.Patch() == 0 && + !c.minorDirty && !c.patchDirty { + return true, nil + } + + if v.Major() != c.con.Major() { + return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) + } + + if v.Minor() != c.con.Minor() && !c.minorDirty { + return false, fmt.Errorf("%s does not have same major and minor version as %s", v, c.orig) + } + + return true, nil +} + +// When there is a .x (dirty) status it automatically opts in to ~. Otherwise +// it's a straight = +func constraintTildeOrEqual(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + if c.dirty { + return constraintTilde(v, c) + } + + eq := v.Equal(c.con) + if eq { + return true, nil + } + + return false, fmt.Errorf("%s is not equal to %s", v, c.orig) +} + +// ^* --> (any) +// ^1.2.3 --> >=1.2.3 <2.0.0 +// ^1.2 --> >=1.2.0 <2.0.0 +// ^1 --> >=1.0.0 <2.0.0 +// ^0.2.3 --> >=0.2.3 <0.3.0 +// ^0.2 --> >=0.2.0 <0.3.0 +// ^0.0.3 --> >=0.0.3 <0.0.4 +// ^0.0 --> >=0.0.0 <0.1.0 +// ^0 --> >=0.0.0 <1.0.0 +func constraintCaret(v *Version, c *constraint) (bool, error) { + // If there is a pre-release on the version but the constraint isn't looking + // for them assume that pre-releases are not compatible. See issue 21 for + // more details. + if v.Prerelease() != "" && c.con.Prerelease() == "" { + return false, fmt.Errorf("%s is a prerelease version and the constraint is only looking for release versions", v) + } + + // This less than handles prereleases + if v.LessThan(c.con) { + return false, fmt.Errorf("%s is less than %s", v, c.orig) + } + + var eq bool + + // ^ when the major > 0 is >=x.y.z < x+1 + if c.con.Major() > 0 || c.minorDirty { + + // ^ has to be within a major range for > 0. Everything less than was + // filtered out with the LessThan call above. This filters out those + // that greater but not within the same major range. + eq = v.Major() == c.con.Major() + if eq { + return true, nil + } + return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) + } + + // ^ when the major is 0 and minor > 0 is >=0.y.z < 0.y+1 + if c.con.Major() == 0 && v.Major() > 0 { + return false, fmt.Errorf("%s does not have same major version as %s", v, c.orig) + } + // If the con Minor is > 0 it is not dirty + if c.con.Minor() > 0 || c.patchDirty { + eq = v.Minor() == c.con.Minor() + if eq { + return true, nil + } + return false, fmt.Errorf("%s does not have same minor version as %s. Expected minor versions to match when constraint major version is 0", v, c.orig) + } + + // At this point the major is 0 and the minor is 0 and not dirty. The patch + // is not dirty so we need to check if they are equal. If they are not equal + eq = c.con.Patch() == v.Patch() + if eq { + return true, nil + } + return false, fmt.Errorf("%s does not equal %s. Expect version and constraint to equal when major and minor versions are 0", v, c.orig) +} + +func isX(x string) bool { + switch x { + case "x", "*", "X": + return true + default: + return false + } +} + +func rewriteRange(i string) string { + m := constraintRangeRegex.FindAllStringSubmatch(i, -1) + if m == nil { + return i + } + o := i + for _, v := range m { + t := fmt.Sprintf(">= %s, <= %s", v[1], v[11]) + o = strings.Replace(o, v[0], t, 1) + } + + return o +} diff --git a/vendor/github.com/Masterminds/semver/v3/doc.go b/vendor/github.com/Masterminds/semver/v3/doc.go new file mode 100644 index 0000000000..391aa46b76 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/doc.go @@ -0,0 +1,184 @@ +/* +Package semver provides the ability to work with Semantic Versions (http://semver.org) in Go. + +Specifically it provides the ability to: + + * Parse semantic versions + * Sort semantic versions + * Check if a semantic version fits within a set of constraints + * Optionally work with a `v` prefix + +Parsing Semantic Versions + +There are two functions that can parse semantic versions. The `StrictNewVersion` +function only parses valid version 2 semantic versions as outlined in the +specification. The `NewVersion` function attempts to coerce a version into a +semantic version and parse it. For example, if there is a leading v or a version +listed without all 3 parts (e.g. 1.2) it will attempt to coerce it into a valid +semantic version (e.g., 1.2.0). In both cases a `Version` object is returned +that can be sorted, compared, and used in constraints. + +When parsing a version an optional error can be returned if there is an issue +parsing the version. For example, + + v, err := semver.NewVersion("1.2.3-beta.1+b345") + +The version object has methods to get the parts of the version, compare it to +other versions, convert the version back into a string, and get the original +string. For more details please see the documentation +at https://godoc.org/github.com/Masterminds/semver. + +Sorting Semantic Versions + +A set of versions can be sorted using the `sort` package from the standard library. +For example, + + raw := []string{"1.2.3", "1.0", "1.3", "2", "0.4.2",} + vs := make([]*semver.Version, len(raw)) + for i, r := range raw { + v, err := semver.NewVersion(r) + if err != nil { + t.Errorf("Error parsing version: %s", err) + } + + vs[i] = v + } + + sort.Sort(semver.Collection(vs)) + +Checking Version Constraints and Comparing Versions + +There are two methods for comparing versions. One uses comparison methods on +`Version` instances and the other is using Constraints. There are some important +differences to notes between these two methods of comparison. + +1. When two versions are compared using functions such as `Compare`, `LessThan`, + and others it will follow the specification and always include prereleases + within the comparison. It will provide an answer valid with the comparison + spec section at https://semver.org/#spec-item-11 +2. When constraint checking is used for checks or validation it will follow a + different set of rules that are common for ranges with tools like npm/js + and Rust/Cargo. This includes considering prereleases to be invalid if the + ranges does not include on. If you want to have it include pre-releases a + simple solution is to include `-0` in your range. +3. Constraint ranges can have some complex rules including the shorthard use of + ~ and ^. For more details on those see the options below. + +There are differences between the two methods or checking versions because the +comparison methods on `Version` follow the specification while comparison ranges +are not part of the specification. Different packages and tools have taken it +upon themselves to come up with range rules. This has resulted in differences. +For example, npm/js and Cargo/Rust follow similar patterns which PHP has a +different pattern for ^. The comparison features in this package follow the +npm/js and Cargo/Rust lead because applications using it have followed similar +patters with their versions. + +Checking a version against version constraints is one of the most featureful +parts of the package. + + c, err := semver.NewConstraint(">= 1.2.3") + if err != nil { + // Handle constraint not being parsable. + } + + v, err := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parsable. + } + // Check if the version meets the constraints. The a variable will be true. + a := c.Check(v) + +Basic Comparisons + +There are two elements to the comparisons. First, a comparison string is a list +of comma or space separated AND comparisons. These are then separated by || (OR) +comparisons. For example, `">= 1.2 < 3.0.0 || >= 4.2.3"` is looking for a +comparison that's greater than or equal to 1.2 and less than 3.0.0 or is +greater than or equal to 4.2.3. This can also be written as +`">= 1.2, < 3.0.0 || >= 4.2.3"` + +The basic comparisons are: + + * `=`: equal (aliased to no operator) + * `!=`: not equal + * `>`: greater than + * `<`: less than + * `>=`: greater than or equal to + * `<=`: less than or equal to + +Hyphen Range Comparisons + +There are multiple methods to handle ranges and the first is hyphens ranges. +These look like: + + * `1.2 - 1.4.5` which is equivalent to `>= 1.2, <= 1.4.5` + * `2.3.4 - 4.5` which is equivalent to `>= 2.3.4 <= 4.5` + +Wildcards In Comparisons + +The `x`, `X`, and `*` characters can be used as a wildcard character. This works +for all comparison operators. When used on the `=` operator it falls +back to the tilde operation. For example, + + * `1.2.x` is equivalent to `>= 1.2.0 < 1.3.0` + * `>= 1.2.x` is equivalent to `>= 1.2.0` + * `<= 2.x` is equivalent to `<= 3` + * `*` is equivalent to `>= 0.0.0` + +Tilde Range Comparisons (Patch) + +The tilde (`~`) comparison operator is for patch level ranges when a minor +version is specified and major level changes when the minor number is missing. +For example, + + * `~1.2.3` is equivalent to `>= 1.2.3 < 1.3.0` + * `~1` is equivalent to `>= 1, < 2` + * `~2.3` is equivalent to `>= 2.3 < 2.4` + * `~1.2.x` is equivalent to `>= 1.2.0 < 1.3.0` + * `~1.x` is equivalent to `>= 1 < 2` + +Caret Range Comparisons (Major) + +The caret (`^`) comparison operator is for major level changes once a stable +(1.0.0) release has occurred. Prior to a 1.0.0 release the minor versions acts +as the API stability level. This is useful when comparisons of API versions as a +major change is API breaking. For example, + + * `^1.2.3` is equivalent to `>= 1.2.3, < 2.0.0` + * `^1.2.x` is equivalent to `>= 1.2.0, < 2.0.0` + * `^2.3` is equivalent to `>= 2.3, < 3` + * `^2.x` is equivalent to `>= 2.0.0, < 3` + * `^0.2.3` is equivalent to `>=0.2.3 <0.3.0` + * `^0.2` is equivalent to `>=0.2.0 <0.3.0` + * `^0.0.3` is equivalent to `>=0.0.3 <0.0.4` + * `^0.0` is equivalent to `>=0.0.0 <0.1.0` + * `^0` is equivalent to `>=0.0.0 <1.0.0` + +Validation + +In addition to testing a version against a constraint, a version can be validated +against a constraint. When validation fails a slice of errors containing why a +version didn't meet the constraint is returned. For example, + + c, err := semver.NewConstraint("<= 1.2.3, >= 1.4") + if err != nil { + // Handle constraint not being parseable. + } + + v, _ := semver.NewVersion("1.3") + if err != nil { + // Handle version not being parseable. + } + + // Validate a version against a constraint. + a, msgs := c.Validate(v) + // a is false + for _, m := range msgs { + fmt.Println(m) + + // Loops over the errors which would read + // "1.3 is greater than 1.2.3" + // "1.3 is less than 1.4" + } +*/ +package semver diff --git a/vendor/github.com/Masterminds/semver/v3/fuzz.go b/vendor/github.com/Masterminds/semver/v3/fuzz.go new file mode 100644 index 0000000000..a242ad7058 --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/fuzz.go @@ -0,0 +1,22 @@ +// +build gofuzz + +package semver + +func Fuzz(data []byte) int { + d := string(data) + + // Test NewVersion + _, _ = NewVersion(d) + + // Test StrictNewVersion + _, _ = StrictNewVersion(d) + + // Test NewConstraint + _, _ = NewConstraint(d) + + // The return value should be 0 normally, 1 if the priority in future tests + // should be increased, and -1 if future tests should skip passing in that + // data. We do not have a reason to change priority so 0 is always returned. + // There are example tests that do this. + return 0 +} diff --git a/vendor/github.com/Masterminds/semver/v3/version.go b/vendor/github.com/Masterminds/semver/v3/version.go new file mode 100644 index 0000000000..d6b9cda3ee --- /dev/null +++ b/vendor/github.com/Masterminds/semver/v3/version.go @@ -0,0 +1,606 @@ +package semver + +import ( + "bytes" + "database/sql/driver" + "encoding/json" + "errors" + "fmt" + "regexp" + "strconv" + "strings" +) + +// The compiled version of the regex created at init() is cached here so it +// only needs to be created once. +var versionRegex *regexp.Regexp + +var ( + // ErrInvalidSemVer is returned a version is found to be invalid when + // being parsed. + ErrInvalidSemVer = errors.New("Invalid Semantic Version") + + // ErrEmptyString is returned when an empty string is passed in for parsing. + ErrEmptyString = errors.New("Version string empty") + + // ErrInvalidCharacters is returned when invalid characters are found as + // part of a version + ErrInvalidCharacters = errors.New("Invalid characters in version") + + // ErrSegmentStartsZero is returned when a version segment starts with 0. + // This is invalid in SemVer. + ErrSegmentStartsZero = errors.New("Version segment starts with 0") + + // ErrInvalidMetadata is returned when the metadata is an invalid format + ErrInvalidMetadata = errors.New("Invalid Metadata string") + + // ErrInvalidPrerelease is returned when the pre-release is an invalid format + ErrInvalidPrerelease = errors.New("Invalid Prerelease string") +) + +// semVerRegex is the regular expression used to parse a semantic version. +const semVerRegex string = `v?([0-9]+)(\.[0-9]+)?(\.[0-9]+)?` + + `(-([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + + `(\+([0-9A-Za-z\-]+(\.[0-9A-Za-z\-]+)*))?` + +// Version represents a single semantic version. +type Version struct { + major, minor, patch uint64 + pre string + metadata string + original string +} + +func init() { + versionRegex = regexp.MustCompile("^" + semVerRegex + "$") +} + +const num string = "0123456789" +const allowed string = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-" + num + +// StrictNewVersion parses a given version and returns an instance of Version or +// an error if unable to parse the version. Only parses valid semantic versions. +// Performs checking that can find errors within the version. +// If you want to coerce a version, such as 1 or 1.2, and perse that as the 1.x +// releases of semver provided use the NewSemver() function. +func StrictNewVersion(v string) (*Version, error) { + // Parsing here does not use RegEx in order to increase performance and reduce + // allocations. + + if len(v) == 0 { + return nil, ErrEmptyString + } + + // Split the parts into [0]major, [1]minor, and [2]patch,prerelease,build + parts := strings.SplitN(v, ".", 3) + if len(parts) != 3 { + return nil, ErrInvalidSemVer + } + + sv := &Version{ + original: v, + } + + // check for prerelease or build metadata + var extra []string + if strings.ContainsAny(parts[2], "-+") { + // Start with the build metadata first as it needs to be on the right + extra = strings.SplitN(parts[2], "+", 2) + if len(extra) > 1 { + // build metadata found + sv.metadata = extra[1] + parts[2] = extra[0] + } + + extra = strings.SplitN(parts[2], "-", 2) + if len(extra) > 1 { + // prerelease found + sv.pre = extra[1] + parts[2] = extra[0] + } + } + + // Validate the number segments are valid. This includes only having positive + // numbers and no leading 0's. + for _, p := range parts { + if !containsOnly(p, num) { + return nil, ErrInvalidCharacters + } + + if len(p) > 1 && p[0] == '0' { + return nil, ErrSegmentStartsZero + } + } + + // Extract the major, minor, and patch elements onto the returned Version + var err error + sv.major, err = strconv.ParseUint(parts[0], 10, 64) + if err != nil { + return nil, err + } + + sv.minor, err = strconv.ParseUint(parts[1], 10, 64) + if err != nil { + return nil, err + } + + sv.patch, err = strconv.ParseUint(parts[2], 10, 64) + if err != nil { + return nil, err + } + + // No prerelease or build metadata found so returning now as a fastpath. + if sv.pre == "" && sv.metadata == "" { + return sv, nil + } + + if sv.pre != "" { + if err = validatePrerelease(sv.pre); err != nil { + return nil, err + } + } + + if sv.metadata != "" { + if err = validateMetadata(sv.metadata); err != nil { + return nil, err + } + } + + return sv, nil +} + +// NewVersion parses a given version and returns an instance of Version or +// an error if unable to parse the version. If the version is SemVer-ish it +// attempts to convert it to SemVer. If you want to validate it was a strict +// semantic version at parse time see StrictNewVersion(). +func NewVersion(v string) (*Version, error) { + m := versionRegex.FindStringSubmatch(v) + if m == nil { + return nil, ErrInvalidSemVer + } + + sv := &Version{ + metadata: m[8], + pre: m[5], + original: v, + } + + var err error + sv.major, err = strconv.ParseUint(m[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + + if m[2] != "" { + sv.minor, err = strconv.ParseUint(strings.TrimPrefix(m[2], "."), 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + } else { + sv.minor = 0 + } + + if m[3] != "" { + sv.patch, err = strconv.ParseUint(strings.TrimPrefix(m[3], "."), 10, 64) + if err != nil { + return nil, fmt.Errorf("Error parsing version segment: %s", err) + } + } else { + sv.patch = 0 + } + + // Perform some basic due diligence on the extra parts to ensure they are + // valid. + + if sv.pre != "" { + if err = validatePrerelease(sv.pre); err != nil { + return nil, err + } + } + + if sv.metadata != "" { + if err = validateMetadata(sv.metadata); err != nil { + return nil, err + } + } + + return sv, nil +} + +// MustParse parses a given version and panics on error. +func MustParse(v string) *Version { + sv, err := NewVersion(v) + if err != nil { + panic(err) + } + return sv +} + +// String converts a Version object to a string. +// Note, if the original version contained a leading v this version will not. +// See the Original() method to retrieve the original value. Semantic Versions +// don't contain a leading v per the spec. Instead it's optional on +// implementation. +func (v Version) String() string { + var buf bytes.Buffer + + fmt.Fprintf(&buf, "%d.%d.%d", v.major, v.minor, v.patch) + if v.pre != "" { + fmt.Fprintf(&buf, "-%s", v.pre) + } + if v.metadata != "" { + fmt.Fprintf(&buf, "+%s", v.metadata) + } + + return buf.String() +} + +// Original returns the original value passed in to be parsed. +func (v *Version) Original() string { + return v.original +} + +// Major returns the major version. +func (v Version) Major() uint64 { + return v.major +} + +// Minor returns the minor version. +func (v Version) Minor() uint64 { + return v.minor +} + +// Patch returns the patch version. +func (v Version) Patch() uint64 { + return v.patch +} + +// Prerelease returns the pre-release version. +func (v Version) Prerelease() string { + return v.pre +} + +// Metadata returns the metadata on the version. +func (v Version) Metadata() string { + return v.metadata +} + +// originalVPrefix returns the original 'v' prefix if any. +func (v Version) originalVPrefix() string { + + // Note, only lowercase v is supported as a prefix by the parser. + if v.original != "" && v.original[:1] == "v" { + return v.original[:1] + } + return "" +} + +// IncPatch produces the next patch version. +// If the current version does not have prerelease/metadata information, +// it unsets metadata and prerelease values, increments patch number. +// If the current version has any of prerelease or metadata information, +// it unsets both values and keeps current patch value +func (v Version) IncPatch() Version { + vNext := v + // according to http://semver.org/#spec-item-9 + // Pre-release versions have a lower precedence than the associated normal version. + // according to http://semver.org/#spec-item-10 + // Build metadata SHOULD be ignored when determining version precedence. + if v.pre != "" { + vNext.metadata = "" + vNext.pre = "" + } else { + vNext.metadata = "" + vNext.pre = "" + vNext.patch = v.patch + 1 + } + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// IncMinor produces the next minor version. +// Sets patch to 0. +// Increments minor number. +// Unsets metadata. +// Unsets prerelease status. +func (v Version) IncMinor() Version { + vNext := v + vNext.metadata = "" + vNext.pre = "" + vNext.patch = 0 + vNext.minor = v.minor + 1 + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// IncMajor produces the next major version. +// Sets patch to 0. +// Sets minor to 0. +// Increments major number. +// Unsets metadata. +// Unsets prerelease status. +func (v Version) IncMajor() Version { + vNext := v + vNext.metadata = "" + vNext.pre = "" + vNext.patch = 0 + vNext.minor = 0 + vNext.major = v.major + 1 + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext +} + +// SetPrerelease defines the prerelease value. +// Value must not include the required 'hyphen' prefix. +func (v Version) SetPrerelease(prerelease string) (Version, error) { + vNext := v + if len(prerelease) > 0 { + if err := validatePrerelease(prerelease); err != nil { + return vNext, err + } + } + vNext.pre = prerelease + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext, nil +} + +// SetMetadata defines metadata value. +// Value must not include the required 'plus' prefix. +func (v Version) SetMetadata(metadata string) (Version, error) { + vNext := v + if len(metadata) > 0 { + if err := validateMetadata(metadata); err != nil { + return vNext, err + } + } + vNext.metadata = metadata + vNext.original = v.originalVPrefix() + "" + vNext.String() + return vNext, nil +} + +// LessThan tests if one version is less than another one. +func (v *Version) LessThan(o *Version) bool { + return v.Compare(o) < 0 +} + +// GreaterThan tests if one version is greater than another one. +func (v *Version) GreaterThan(o *Version) bool { + return v.Compare(o) > 0 +} + +// Equal tests if two versions are equal to each other. +// Note, versions can be equal with different metadata since metadata +// is not considered part of the comparable version. +func (v *Version) Equal(o *Version) bool { + return v.Compare(o) == 0 +} + +// Compare compares this version to another one. It returns -1, 0, or 1 if +// the version smaller, equal, or larger than the other version. +// +// Versions are compared by X.Y.Z. Build metadata is ignored. Prerelease is +// lower than the version without a prerelease. Compare always takes into account +// prereleases. If you want to work with ranges using typical range syntaxes that +// skip prereleases if the range is not looking for them use constraints. +func (v *Version) Compare(o *Version) int { + // Compare the major, minor, and patch version for differences. If a + // difference is found return the comparison. + if d := compareSegment(v.Major(), o.Major()); d != 0 { + return d + } + if d := compareSegment(v.Minor(), o.Minor()); d != 0 { + return d + } + if d := compareSegment(v.Patch(), o.Patch()); d != 0 { + return d + } + + // At this point the major, minor, and patch versions are the same. + ps := v.pre + po := o.Prerelease() + + if ps == "" && po == "" { + return 0 + } + if ps == "" { + return 1 + } + if po == "" { + return -1 + } + + return comparePrerelease(ps, po) +} + +// UnmarshalJSON implements JSON.Unmarshaler interface. +func (v *Version) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + temp, err := NewVersion(s) + if err != nil { + return err + } + v.major = temp.major + v.minor = temp.minor + v.patch = temp.patch + v.pre = temp.pre + v.metadata = temp.metadata + v.original = temp.original + return nil +} + +// MarshalJSON implements JSON.Marshaler interface. +func (v Version) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// Scan implements the SQL.Scanner interface. +func (v *Version) Scan(value interface{}) error { + var s string + s, _ = value.(string) + temp, err := NewVersion(s) + if err != nil { + return err + } + v.major = temp.major + v.minor = temp.minor + v.patch = temp.patch + v.pre = temp.pre + v.metadata = temp.metadata + v.original = temp.original + return nil +} + +// Value implements the Driver.Valuer interface. +func (v Version) Value() (driver.Value, error) { + return v.String(), nil +} + +func compareSegment(v, o uint64) int { + if v < o { + return -1 + } + if v > o { + return 1 + } + + return 0 +} + +func comparePrerelease(v, o string) int { + + // split the prelease versions by their part. The separator, per the spec, + // is a . + sparts := strings.Split(v, ".") + oparts := strings.Split(o, ".") + + // Find the longer length of the parts to know how many loop iterations to + // go through. + slen := len(sparts) + olen := len(oparts) + + l := slen + if olen > slen { + l = olen + } + + // Iterate over each part of the prereleases to compare the differences. + for i := 0; i < l; i++ { + // Since the lentgh of the parts can be different we need to create + // a placeholder. This is to avoid out of bounds issues. + stemp := "" + if i < slen { + stemp = sparts[i] + } + + otemp := "" + if i < olen { + otemp = oparts[i] + } + + d := comparePrePart(stemp, otemp) + if d != 0 { + return d + } + } + + // Reaching here means two versions are of equal value but have different + // metadata (the part following a +). They are not identical in string form + // but the version comparison finds them to be equal. + return 0 +} + +func comparePrePart(s, o string) int { + // Fastpath if they are equal + if s == o { + return 0 + } + + // When s or o are empty we can use the other in an attempt to determine + // the response. + if s == "" { + if o != "" { + return -1 + } + return 1 + } + + if o == "" { + if s != "" { + return 1 + } + return -1 + } + + // When comparing strings "99" is greater than "103". To handle + // cases like this we need to detect numbers and compare them. According + // to the semver spec, numbers are always positive. If there is a - at the + // start like -99 this is to be evaluated as an alphanum. numbers always + // have precedence over alphanum. Parsing as Uints because negative numbers + // are ignored. + + oi, n1 := strconv.ParseUint(o, 10, 64) + si, n2 := strconv.ParseUint(s, 10, 64) + + // The case where both are strings compare the strings + if n1 != nil && n2 != nil { + if s > o { + return 1 + } + return -1 + } else if n1 != nil { + // o is a string and s is a number + return -1 + } else if n2 != nil { + // s is a string and o is a number + return 1 + } + // Both are numbers + if si > oi { + return 1 + } + return -1 + +} + +// Like strings.ContainsAny but does an only instead of any. +func containsOnly(s string, comp string) bool { + return strings.IndexFunc(s, func(r rune) bool { + return !strings.ContainsRune(comp, r) + }) == -1 +} + +// From the spec, "Identifiers MUST comprise only +// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty. +// Numeric identifiers MUST NOT include leading zeroes.". These segments can +// be dot separated. +func validatePrerelease(p string) error { + eparts := strings.Split(p, ".") + for _, p := range eparts { + if containsOnly(p, num) { + if len(p) > 1 && p[0] == '0' { + return ErrSegmentStartsZero + } + } else if !containsOnly(p, allowed) { + return ErrInvalidPrerelease + } + } + + return nil +} + +// From the spec, "Build metadata MAY be denoted by +// appending a plus sign and a series of dot separated identifiers immediately +// following the patch or pre-release version. Identifiers MUST comprise only +// ASCII alphanumerics and hyphen [0-9A-Za-z-]. Identifiers MUST NOT be empty." +func validateMetadata(m string) error { + eparts := strings.Split(m, ".") + for _, p := range eparts { + if !containsOnly(p, allowed) { + return ErrInvalidMetadata + } + } + return nil +} diff --git a/vendor/github.com/Masterminds/sprig/v3/.gitignore b/vendor/github.com/Masterminds/sprig/v3/.gitignore new file mode 100644 index 0000000000..5e3002f88f --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/.gitignore @@ -0,0 +1,2 @@ +vendor/ +/.glide diff --git a/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md b/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md new file mode 100644 index 0000000000..fcdd4e88ae --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/CHANGELOG.md @@ -0,0 +1,370 @@ +# Changelog + +## Release 3.2.1 (2021-02-04) + +### Changed + +- Upgraded `Masterminds/goutils` to `v1.1.1`. see the [Security Advisory](https://github.com/Masterminds/goutils/security/advisories/GHSA-xg2h-wx96-xgxr) + +## Release 3.2.0 (2020-12-14) + +### Added + +- #211: Added randInt function (thanks @kochurovro) +- #223: Added fromJson and mustFromJson functions (thanks @mholt) +- #242: Added a bcrypt function (thanks @robbiet480) +- #253: Added randBytes function (thanks @MikaelSmith) +- #254: Added dig function for dicts (thanks @nyarly) +- #257: Added regexQuoteMeta for quoting regex metadata (thanks @rheaton) +- #261: Added filepath functions osBase, osDir, osExt, osClean, osIsAbs (thanks @zugl) +- #268: Added and and all functions for testing conditions (thanks @phuslu) +- #181: Added float64 arithmetic addf, add1f, subf, divf, mulf, maxf, and minf + (thanks @andrewmostello) +- #265: Added chunk function to split array into smaller arrays (thanks @karelbilek) +- #270: Extend certificate functions to handle non-RSA keys + add support for + ed25519 keys (thanks @misberner) + +### Changed + +- Removed testing and support for Go 1.12. ed25519 support requires Go 1.13 or newer +- Using semver 3.1.1 and mergo 0.3.11 + +### Fixed + +- #249: Fix htmlDateInZone example (thanks @spawnia) + +NOTE: The dependency github.com/imdario/mergo reverted the breaking change in +0.3.9 via 0.3.10 release. + +## Release 3.1.0 (2020-04-16) + +NOTE: The dependency github.com/imdario/mergo made a behavior change in 0.3.9 +that impacts sprig functionality. Do not use sprig with a version newer than 0.3.8. + +### Added + +- #225: Added support for generating htpasswd hash (thanks @rustycl0ck) +- #224: Added duration filter (thanks @frebib) +- #205: Added `seq` function (thanks @thadc23) + +### Changed + +- #203: Unlambda functions with correct signature (thanks @muesli) +- #236: Updated the license formatting for GitHub display purposes +- #238: Updated package dependency versions. Note, mergo not updated to 0.3.9 + as it causes a breaking change for sprig. That issue is tracked at + https://github.com/imdario/mergo/issues/139 + +### Fixed + +- #229: Fix `seq` example in docs (thanks @kalmant) + +## Release 3.0.2 (2019-12-13) + +### Fixed + +- #220: Updating to semver v3.0.3 to fix issue with <= ranges +- #218: fix typo elyptical->elliptic in ecdsa key description (thanks @laverya) + +## Release 3.0.1 (2019-12-08) + +### Fixed + +- #212: Updated semver fixing broken constraint checking with ^0.0 + +## Release 3.0.0 (2019-10-02) + +### Added + +- #187: Added durationRound function (thanks @yjp20) +- #189: Added numerous template functions that return errors rather than panic (thanks @nrvnrvn) +- #193: Added toRawJson support (thanks @Dean-Coakley) +- #197: Added get support to dicts (thanks @Dean-Coakley) + +### Changed + +- #186: Moving dependency management to Go modules +- #186: Updated semver to v3. This has changes in the way ^ is handled +- #194: Updated documentation on merging and how it copies. Added example using deepCopy +- #196: trunc now supports negative values (thanks @Dean-Coakley) + +## Release 2.22.0 (2019-10-02) + +### Added + +- #173: Added getHostByName function to resolve dns names to ips (thanks @fcgravalos) +- #195: Added deepCopy function for use with dicts + +### Changed + +- Updated merge and mergeOverwrite documentation to explain copying and how to + use deepCopy with it + +## Release 2.21.0 (2019-09-18) + +### Added + +- #122: Added encryptAES/decryptAES functions (thanks @n0madic) +- #128: Added toDecimal support (thanks @Dean-Coakley) +- #169: Added list contcat (thanks @astorath) +- #174: Added deepEqual function (thanks @bonifaido) +- #170: Added url parse and join functions (thanks @astorath) + +### Changed + +- #171: Updated glide config for Google UUID to v1 and to add ranges to semver and testify + +### Fixed + +- #172: Fix semver wildcard example (thanks @piepmatz) +- #175: Fix dateInZone doc example (thanks @s3than) + +## Release 2.20.0 (2019-06-18) + +### Added + +- #164: Adding function to get unix epoch for a time (@mattfarina) +- #166: Adding tests for date_in_zone (@mattfarina) + +### Changed + +- #144: Fix function comments based on best practices from Effective Go (@CodeLingoTeam) +- #150: Handles pointer type for time.Time in "htmlDate" (@mapreal19) +- #161, #157, #160, #153, #158, #156, #155, #159, #152 documentation updates (@badeadan) + +### Fixed + +## Release 2.19.0 (2019-03-02) + +IMPORTANT: This release reverts a change from 2.18.0 + +In the previous release (2.18), we prematurely merged a partial change to the crypto functions that led to creating two sets of crypto functions (I blame @technosophos -- since that's me). This release rolls back that change, and does what was originally intended: It alters the existing crypto functions to use secure random. + +We debated whether this classifies as a change worthy of major revision, but given the proximity to the last release, we have decided that treating 2.18 as a faulty release is the correct course of action. We apologize for any inconvenience. + +### Changed + +- Fix substr panic 35fb796 (Alexey igrychev) +- Remove extra period 1eb7729 (Matthew Lorimor) +- Make random string functions use crypto by default 6ceff26 (Matthew Lorimor) +- README edits/fixes/suggestions 08fe136 (Lauri Apple) + + +## Release 2.18.0 (2019-02-12) + +### Added + +- Added mergeOverwrite function +- cryptographic functions that use secure random (see fe1de12) + +### Changed + +- Improve documentation of regexMatch function, resolves #139 90b89ce (Jan Tagscherer) +- Handle has for nil list 9c10885 (Daniel Cohen) +- Document behaviour of mergeOverwrite fe0dbe9 (Lukas Rieder) +- doc: adds missing documentation. 4b871e6 (Fernandez Ludovic) +- Replace outdated goutils imports 01893d2 (Matthew Lorimor) +- Surface crypto secure random strings from goutils fe1de12 (Matthew Lorimor) +- Handle untyped nil values as paramters to string functions 2b2ec8f (Morten Torkildsen) + +### Fixed + +- Fix dict merge issue and provide mergeOverwrite .dst .src1 to overwrite from src -> dst 4c59c12 (Lukas Rieder) +- Fix substr var names and comments d581f80 (Dean Coakley) +- Fix substr documentation 2737203 (Dean Coakley) + +## Release 2.17.1 (2019-01-03) + +### Fixed + +The 2.17.0 release did not have a version pinned for xstrings, which caused compilation failures when xstrings < 1.2 was used. This adds the correct version string to glide.yaml. + +## Release 2.17.0 (2019-01-03) + +### Added + +- adds alder32sum function and test 6908fc2 (marshallford) +- Added kebabcase function ca331a1 (Ilyes512) + +### Changed + +- Update goutils to 1.1.0 4e1125d (Matt Butcher) + +### Fixed + +- Fix 'has' documentation e3f2a85 (dean-coakley) +- docs(dict): fix typo in pick example dc424f9 (Dustin Specker) +- fixes spelling errors... not sure how that happened 4cf188a (marshallford) + +## Release 2.16.0 (2018-08-13) + +### Added + +- add splitn function fccb0b0 (Helgi Þorbjörnsson) +- Add slice func df28ca7 (gongdo) +- Generate serial number a3bdffd (Cody Coons) +- Extract values of dict with values function df39312 (Lawrence Jones) + +### Changed + +- Modify panic message for list.slice ae38335 (gongdo) +- Minor improvement in code quality - Removed an unreachable piece of code at defaults.go#L26:6 - Resolve formatting issues. 5834241 (Abhishek Kashyap) +- Remove duplicated documentation 1d97af1 (Matthew Fisher) +- Test on go 1.11 49df809 (Helgi Þormar Þorbjörnsson) + +### Fixed + +- Fix file permissions c5f40b5 (gongdo) +- Fix example for buildCustomCert 7779e0d (Tin Lam) + +## Release 2.15.0 (2018-04-02) + +### Added + +- #68 and #69: Add json helpers to docs (thanks @arunvelsriram) +- #66: Add ternary function (thanks @binoculars) +- #67: Allow keys function to take multiple dicts (thanks @binoculars) +- #89: Added sha1sum to crypto function (thanks @benkeil) +- #81: Allow customizing Root CA that used by genSignedCert (thanks @chenzhiwei) +- #92: Add travis testing for go 1.10 +- #93: Adding appveyor config for windows testing + +### Changed + +- #90: Updating to more recent dependencies +- #73: replace satori/go.uuid with google/uuid (thanks @petterw) + +### Fixed + +- #76: Fixed documentation typos (thanks @Thiht) +- Fixed rounding issue on the `ago` function. Note, the removes support for Go 1.8 and older + +## Release 2.14.1 (2017-12-01) + +### Fixed + +- #60: Fix typo in function name documentation (thanks @neil-ca-moore) +- #61: Removing line with {{ due to blocking github pages genertion +- #64: Update the list functions to handle int, string, and other slices for compatibility + +## Release 2.14.0 (2017-10-06) + +This new version of Sprig adds a set of functions for generating and working with SSL certificates. + +- `genCA` generates an SSL Certificate Authority +- `genSelfSignedCert` generates an SSL self-signed certificate +- `genSignedCert` generates an SSL certificate and key based on a given CA + +## Release 2.13.0 (2017-09-18) + +This release adds new functions, including: + +- `regexMatch`, `regexFindAll`, `regexFind`, `regexReplaceAll`, `regexReplaceAllLiteral`, and `regexSplit` to work with regular expressions +- `floor`, `ceil`, and `round` math functions +- `toDate` converts a string to a date +- `nindent` is just like `indent` but also prepends a new line +- `ago` returns the time from `time.Now` + +### Added + +- #40: Added basic regex functionality (thanks @alanquillin) +- #41: Added ceil floor and round functions (thanks @alanquillin) +- #48: Added toDate function (thanks @andreynering) +- #50: Added nindent function (thanks @binoculars) +- #46: Added ago function (thanks @slayer) + +### Changed + +- #51: Updated godocs to include new string functions (thanks @curtisallen) +- #49: Added ability to merge multiple dicts (thanks @binoculars) + +## Release 2.12.0 (2017-05-17) + +- `snakecase`, `camelcase`, and `shuffle` are three new string functions +- `fail` allows you to bail out of a template render when conditions are not met + +## Release 2.11.0 (2017-05-02) + +- Added `toJson` and `toPrettyJson` +- Added `merge` +- Refactored documentation + +## Release 2.10.0 (2017-03-15) + +- Added `semver` and `semverCompare` for Semantic Versions +- `list` replaces `tuple` +- Fixed issue with `join` +- Added `first`, `last`, `intial`, `rest`, `prepend`, `append`, `toString`, `toStrings`, `sortAlpha`, `reverse`, `coalesce`, `pluck`, `pick`, `compact`, `keys`, `omit`, `uniq`, `has`, `without` + +## Release 2.9.0 (2017-02-23) + +- Added `splitList` to split a list +- Added crypto functions of `genPrivateKey` and `derivePassword` + +## Release 2.8.0 (2016-12-21) + +- Added access to several path functions (`base`, `dir`, `clean`, `ext`, and `abs`) +- Added functions for _mutating_ dictionaries (`set`, `unset`, `hasKey`) + +## Release 2.7.0 (2016-12-01) + +- Added `sha256sum` to generate a hash of an input +- Added functions to convert a numeric or string to `int`, `int64`, `float64` + +## Release 2.6.0 (2016-10-03) + +- Added a `uuidv4` template function for generating UUIDs inside of a template. + +## Release 2.5.0 (2016-08-19) + +- New `trimSuffix`, `trimPrefix`, `hasSuffix`, and `hasPrefix` functions +- New aliases have been added for a few functions that didn't follow the naming conventions (`trimAll` and `abbrevBoth`) +- `trimall` and `abbrevboth` (notice the case) are deprecated and will be removed in 3.0.0 + +## Release 2.4.0 (2016-08-16) + +- Adds two functions: `until` and `untilStep` + +## Release 2.3.0 (2016-06-21) + +- cat: Concatenate strings with whitespace separators. +- replace: Replace parts of a string: `replace " " "-" "Me First"` renders "Me-First" +- plural: Format plurals: `len "foo" | plural "one foo" "many foos"` renders "many foos" +- indent: Indent blocks of text in a way that is sensitive to "\n" characters. + +## Release 2.2.0 (2016-04-21) + +- Added a `genPrivateKey` function (Thanks @bacongobbler) + +## Release 2.1.0 (2016-03-30) + +- `default` now prints the default value when it does not receive a value down the pipeline. It is much safer now to do `{{.Foo | default "bar"}}`. +- Added accessors for "hermetic" functions. These return only functions that, when given the same input, produce the same output. + +## Release 2.0.0 (2016-03-29) + +Because we switched from `int` to `int64` as the return value for all integer math functions, the library's major version number has been incremented. + +- `min` complements `max` (formerly `biggest`) +- `empty` indicates that a value is the empty value for its type +- `tuple` creates a tuple inside of a template: `{{$t := tuple "a", "b" "c"}}` +- `dict` creates a dictionary inside of a template `{{$d := dict "key1" "val1" "key2" "val2"}}` +- Date formatters have been added for HTML dates (as used in `date` input fields) +- Integer math functions can convert from a number of types, including `string` (via `strconv.ParseInt`). + +## Release 1.2.0 (2016-02-01) + +- Added quote and squote +- Added b32enc and b32dec +- add now takes varargs +- biggest now takes varargs + +## Release 1.1.0 (2015-12-29) + +- Added #4: Added contains function. strings.Contains, but with the arguments + switched to simplify common pipelines. (thanks krancour) +- Added Travis-CI testing support + +## Release 1.0.0 (2015-12-23) + +- Initial release diff --git a/vendor/github.com/Masterminds/sprig/v3/LICENSE.txt b/vendor/github.com/Masterminds/sprig/v3/LICENSE.txt new file mode 100644 index 0000000000..f311b1eaaa --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (C) 2013-2020 Masterminds + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Masterminds/sprig/v3/Makefile b/vendor/github.com/Masterminds/sprig/v3/Makefile new file mode 100644 index 0000000000..78d409cde2 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/Makefile @@ -0,0 +1,9 @@ +.PHONY: test +test: + @echo "==> Running tests" + GO111MODULE=on go test -v + +.PHONY: test-cover +test-cover: + @echo "==> Running Tests with coverage" + GO111MODULE=on go test -cover . diff --git a/vendor/github.com/Masterminds/sprig/v3/README.md b/vendor/github.com/Masterminds/sprig/v3/README.md new file mode 100644 index 0000000000..c37ba01c21 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/README.md @@ -0,0 +1,101 @@ +# Sprig: Template functions for Go templates + +[![GoDoc](https://img.shields.io/static/v1?label=godoc&message=reference&color=blue)](https://pkg.go.dev/github.com/Masterminds/sprig/v3) +[![Go Report Card](https://goreportcard.com/badge/github.com/Masterminds/sprig)](https://goreportcard.com/report/github.com/Masterminds/sprig) +[![Stability: Sustained](https://masterminds.github.io/stability/sustained.svg)](https://masterminds.github.io/stability/sustained.html) +[![](https://github.com/Masterminds/sprig/workflows/Tests/badge.svg)](https://github.com/Masterminds/sprig/actions) + +The Go language comes with a [built-in template +language](http://golang.org/pkg/text/template/), but not +very many template functions. Sprig is a library that provides more than 100 commonly +used template functions. + +It is inspired by the template functions found in +[Twig](http://twig.sensiolabs.org/documentation) and in various +JavaScript libraries, such as [underscore.js](http://underscorejs.org/). + +## IMPORTANT NOTES + +Sprig leverages [mergo](https://github.com/imdario/mergo) to handle merges. In +its v0.3.9 release there was a behavior change that impacts merging template +functions in sprig. It is currently recommended to use v0.3.8 of that package. +Using v0.3.9 will cause sprig tests to fail. The issue in mergo is tracked at +https://github.com/imdario/mergo/issues/139. + +## Package Versions + +There are two active major versions of the `sprig` package. + +* v3 is currently stable release series on the `master` branch. The Go API should + remain compatible with v2, the current stable version. Behavior change behind + some functions is the reason for the new major version. +* v2 is the previous stable release series. It has been more than three years since + the initial release of v2. You can read the documentation and see the code + on the [release-2](https://github.com/Masterminds/sprig/tree/release-2) branch. + Bug fixes to this major version will continue for some time. + +## Usage + +**Template developers**: Please use Sprig's [function documentation](http://masterminds.github.io/sprig/) for +detailed instructions and code snippets for the >100 template functions available. + +**Go developers**: If you'd like to include Sprig as a library in your program, +our API documentation is available [at GoDoc.org](http://godoc.org/github.com/Masterminds/sprig). + +For standard usage, read on. + +### Load the Sprig library + +To load the Sprig `FuncMap`: + +```go + +import ( + "github.com/Masterminds/sprig" + "html/template" +) + +// This example illustrates that the FuncMap *must* be set before the +// templates themselves are loaded. +tpl := template.Must( + template.New("base").Funcs(sprig.FuncMap()).ParseGlob("*.html") +) + + +``` + +### Calling the functions inside of templates + +By convention, all functions are lowercase. This seems to follow the Go +idiom for template functions (as opposed to template methods, which are +TitleCase). For example, this: + +``` +{{ "hello!" | upper | repeat 5 }} +``` + +produces this: + +``` +HELLO!HELLO!HELLO!HELLO!HELLO! +``` + +## Principles Driving Our Function Selection + +We followed these principles to decide which functions to add and how to implement them: + +- Use template functions to build layout. The following + types of operations are within the domain of template functions: + - Formatting + - Layout + - Simple type conversions + - Utilities that assist in handling common formatting and layout needs (e.g. arithmetic) +- Template functions should not return errors unless there is no way to print + a sensible value. For example, converting a string to an integer should not + produce an error if conversion fails. Instead, it should display a default + value. +- Simple math is necessary for grid layouts, pagers, and so on. Complex math + (anything other than arithmetic) should be done outside of templates. +- Template functions only deal with the data passed into them. They never retrieve + data from a source. +- Finally, do not override core Go template functions. diff --git a/vendor/github.com/Masterminds/sprig/v3/crypto.go b/vendor/github.com/Masterminds/sprig/v3/crypto.go new file mode 100644 index 0000000000..13a5cd5593 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/crypto.go @@ -0,0 +1,653 @@ +package sprig + +import ( + "bytes" + "crypto" + "crypto/aes" + "crypto/cipher" + "crypto/dsa" + "crypto/ecdsa" + "crypto/ed25519" + "crypto/elliptic" + "crypto/hmac" + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "crypto/x509" + "crypto/x509/pkix" + "encoding/asn1" + "encoding/base64" + "encoding/binary" + "encoding/hex" + "encoding/pem" + "errors" + "fmt" + "hash/adler32" + "io" + "math/big" + "net" + "time" + + "strings" + + "github.com/google/uuid" + bcrypt_lib "golang.org/x/crypto/bcrypt" + "golang.org/x/crypto/scrypt" +) + +func sha256sum(input string) string { + hash := sha256.Sum256([]byte(input)) + return hex.EncodeToString(hash[:]) +} + +func sha1sum(input string) string { + hash := sha1.Sum([]byte(input)) + return hex.EncodeToString(hash[:]) +} + +func adler32sum(input string) string { + hash := adler32.Checksum([]byte(input)) + return fmt.Sprintf("%d", hash) +} + +func bcrypt(input string) string { + hash, err := bcrypt_lib.GenerateFromPassword([]byte(input), bcrypt_lib.DefaultCost) + if err != nil { + return fmt.Sprintf("failed to encrypt string with bcrypt: %s", err) + } + + return string(hash) +} + +func htpasswd(username string, password string) string { + if strings.Contains(username, ":") { + return fmt.Sprintf("invalid username: %s", username) + } + return fmt.Sprintf("%s:%s", username, bcrypt(password)) +} + +func randBytes(count int) (string, error) { + buf := make([]byte, count) + if _, err := rand.Read(buf); err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(buf), nil +} + +// uuidv4 provides a safe and secure UUID v4 implementation +func uuidv4() string { + return uuid.New().String() +} + +var masterPasswordSeed = "com.lyndir.masterpassword" + +var passwordTypeTemplates = map[string][][]byte{ + "maximum": {[]byte("anoxxxxxxxxxxxxxxxxx"), []byte("axxxxxxxxxxxxxxxxxno")}, + "long": {[]byte("CvcvnoCvcvCvcv"), []byte("CvcvCvcvnoCvcv"), []byte("CvcvCvcvCvcvno"), []byte("CvccnoCvcvCvcv"), []byte("CvccCvcvnoCvcv"), + []byte("CvccCvcvCvcvno"), []byte("CvcvnoCvccCvcv"), []byte("CvcvCvccnoCvcv"), []byte("CvcvCvccCvcvno"), []byte("CvcvnoCvcvCvcc"), + []byte("CvcvCvcvnoCvcc"), []byte("CvcvCvcvCvccno"), []byte("CvccnoCvccCvcv"), []byte("CvccCvccnoCvcv"), []byte("CvccCvccCvcvno"), + []byte("CvcvnoCvccCvcc"), []byte("CvcvCvccnoCvcc"), []byte("CvcvCvccCvccno"), []byte("CvccnoCvcvCvcc"), []byte("CvccCvcvnoCvcc"), + []byte("CvccCvcvCvccno")}, + "medium": {[]byte("CvcnoCvc"), []byte("CvcCvcno")}, + "short": {[]byte("Cvcn")}, + "basic": {[]byte("aaanaaan"), []byte("aannaaan"), []byte("aaannaaa")}, + "pin": {[]byte("nnnn")}, +} + +var templateCharacters = map[byte]string{ + 'V': "AEIOU", + 'C': "BCDFGHJKLMNPQRSTVWXYZ", + 'v': "aeiou", + 'c': "bcdfghjklmnpqrstvwxyz", + 'A': "AEIOUBCDFGHJKLMNPQRSTVWXYZ", + 'a': "AEIOUaeiouBCDFGHJKLMNPQRSTVWXYZbcdfghjklmnpqrstvwxyz", + 'n': "0123456789", + 'o': "@&%?,=[]_:-+*$#!'^~;()/.", + 'x': "AEIOUaeiouBCDFGHJKLMNPQRSTVWXYZbcdfghjklmnpqrstvwxyz0123456789!@#$%^&*()", +} + +func derivePassword(counter uint32, passwordType, password, user, site string) string { + var templates = passwordTypeTemplates[passwordType] + if templates == nil { + return fmt.Sprintf("cannot find password template %s", passwordType) + } + + var buffer bytes.Buffer + buffer.WriteString(masterPasswordSeed) + binary.Write(&buffer, binary.BigEndian, uint32(len(user))) + buffer.WriteString(user) + + salt := buffer.Bytes() + key, err := scrypt.Key([]byte(password), salt, 32768, 8, 2, 64) + if err != nil { + return fmt.Sprintf("failed to derive password: %s", err) + } + + buffer.Truncate(len(masterPasswordSeed)) + binary.Write(&buffer, binary.BigEndian, uint32(len(site))) + buffer.WriteString(site) + binary.Write(&buffer, binary.BigEndian, counter) + + var hmacv = hmac.New(sha256.New, key) + hmacv.Write(buffer.Bytes()) + var seed = hmacv.Sum(nil) + var temp = templates[int(seed[0])%len(templates)] + + buffer.Truncate(0) + for i, element := range temp { + passChars := templateCharacters[element] + passChar := passChars[int(seed[i+1])%len(passChars)] + buffer.WriteByte(passChar) + } + + return buffer.String() +} + +func generatePrivateKey(typ string) string { + var priv interface{} + var err error + switch typ { + case "", "rsa": + // good enough for government work + priv, err = rsa.GenerateKey(rand.Reader, 4096) + case "dsa": + key := new(dsa.PrivateKey) + // again, good enough for government work + if err = dsa.GenerateParameters(&key.Parameters, rand.Reader, dsa.L2048N256); err != nil { + return fmt.Sprintf("failed to generate dsa params: %s", err) + } + err = dsa.GenerateKey(key, rand.Reader) + priv = key + case "ecdsa": + // again, good enough for government work + priv, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + case "ed25519": + _, priv, err = ed25519.GenerateKey(rand.Reader) + default: + return "Unknown type " + typ + } + if err != nil { + return fmt.Sprintf("failed to generate private key: %s", err) + } + + return string(pem.EncodeToMemory(pemBlockForKey(priv))) +} + +// DSAKeyFormat stores the format for DSA keys. +// Used by pemBlockForKey +type DSAKeyFormat struct { + Version int + P, Q, G, Y, X *big.Int +} + +func pemBlockForKey(priv interface{}) *pem.Block { + switch k := priv.(type) { + case *rsa.PrivateKey: + return &pem.Block{Type: "RSA PRIVATE KEY", Bytes: x509.MarshalPKCS1PrivateKey(k)} + case *dsa.PrivateKey: + val := DSAKeyFormat{ + P: k.P, Q: k.Q, G: k.G, + Y: k.Y, X: k.X, + } + bytes, _ := asn1.Marshal(val) + return &pem.Block{Type: "DSA PRIVATE KEY", Bytes: bytes} + case *ecdsa.PrivateKey: + b, _ := x509.MarshalECPrivateKey(k) + return &pem.Block{Type: "EC PRIVATE KEY", Bytes: b} + default: + // attempt PKCS#8 format for all other keys + b, err := x509.MarshalPKCS8PrivateKey(k) + if err != nil { + return nil + } + return &pem.Block{Type: "PRIVATE KEY", Bytes: b} + } +} + +func parsePrivateKeyPEM(pemBlock string) (crypto.PrivateKey, error) { + block, _ := pem.Decode([]byte(pemBlock)) + if block == nil { + return nil, errors.New("no PEM data in input") + } + + if block.Type == "PRIVATE KEY" { + priv, err := x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + return nil, fmt.Errorf("decoding PEM as PKCS#8: %s", err) + } + return priv, nil + } else if !strings.HasSuffix(block.Type, " PRIVATE KEY") { + return nil, fmt.Errorf("no private key data in PEM block of type %s", block.Type) + } + + switch block.Type[:len(block.Type)-12] { // strip " PRIVATE KEY" + case "RSA": + priv, err := x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return nil, fmt.Errorf("parsing RSA private key from PEM: %s", err) + } + return priv, nil + case "EC": + priv, err := x509.ParseECPrivateKey(block.Bytes) + if err != nil { + return nil, fmt.Errorf("parsing EC private key from PEM: %s", err) + } + return priv, nil + case "DSA": + var k DSAKeyFormat + _, err := asn1.Unmarshal(block.Bytes, &k) + if err != nil { + return nil, fmt.Errorf("parsing DSA private key from PEM: %s", err) + } + priv := &dsa.PrivateKey{ + PublicKey: dsa.PublicKey{ + Parameters: dsa.Parameters{ + P: k.P, Q: k.Q, G: k.G, + }, + Y: k.Y, + }, + X: k.X, + } + return priv, nil + default: + return nil, fmt.Errorf("invalid private key type %s", block.Type) + } +} + +func getPublicKey(priv crypto.PrivateKey) (crypto.PublicKey, error) { + switch k := priv.(type) { + case interface{ Public() crypto.PublicKey }: + return k.Public(), nil + case *dsa.PrivateKey: + return &k.PublicKey, nil + default: + return nil, fmt.Errorf("unable to get public key for type %T", priv) + } +} + +type certificate struct { + Cert string + Key string +} + +func buildCustomCertificate(b64cert string, b64key string) (certificate, error) { + crt := certificate{} + + cert, err := base64.StdEncoding.DecodeString(b64cert) + if err != nil { + return crt, errors.New("unable to decode base64 certificate") + } + + key, err := base64.StdEncoding.DecodeString(b64key) + if err != nil { + return crt, errors.New("unable to decode base64 private key") + } + + decodedCert, _ := pem.Decode(cert) + if decodedCert == nil { + return crt, errors.New("unable to decode certificate") + } + _, err = x509.ParseCertificate(decodedCert.Bytes) + if err != nil { + return crt, fmt.Errorf( + "error parsing certificate: decodedCert.Bytes: %s", + err, + ) + } + + _, err = parsePrivateKeyPEM(string(key)) + if err != nil { + return crt, fmt.Errorf( + "error parsing private key: %s", + err, + ) + } + + crt.Cert = string(cert) + crt.Key = string(key) + + return crt, nil +} + +func generateCertificateAuthority( + cn string, + daysValid int, +) (certificate, error) { + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return certificate{}, fmt.Errorf("error generating rsa key: %s", err) + } + + return generateCertificateAuthorityWithKeyInternal(cn, daysValid, priv) +} + +func generateCertificateAuthorityWithPEMKey( + cn string, + daysValid int, + privPEM string, +) (certificate, error) { + priv, err := parsePrivateKeyPEM(privPEM) + if err != nil { + return certificate{}, fmt.Errorf("parsing private key: %s", err) + } + return generateCertificateAuthorityWithKeyInternal(cn, daysValid, priv) +} + +func generateCertificateAuthorityWithKeyInternal( + cn string, + daysValid int, + priv crypto.PrivateKey, +) (certificate, error) { + ca := certificate{} + + template, err := getBaseCertTemplate(cn, nil, nil, daysValid) + if err != nil { + return ca, err + } + // Override KeyUsage and IsCA + template.KeyUsage = x509.KeyUsageKeyEncipherment | + x509.KeyUsageDigitalSignature | + x509.KeyUsageCertSign + template.IsCA = true + + ca.Cert, ca.Key, err = getCertAndKey(template, priv, template, priv) + + return ca, err +} + +func generateSelfSignedCertificate( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, +) (certificate, error) { + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return certificate{}, fmt.Errorf("error generating rsa key: %s", err) + } + return generateSelfSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, priv) +} + +func generateSelfSignedCertificateWithPEMKey( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, + privPEM string, +) (certificate, error) { + priv, err := parsePrivateKeyPEM(privPEM) + if err != nil { + return certificate{}, fmt.Errorf("parsing private key: %s", err) + } + return generateSelfSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, priv) +} + +func generateSelfSignedCertificateWithKeyInternal( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, + priv crypto.PrivateKey, +) (certificate, error) { + cert := certificate{} + + template, err := getBaseCertTemplate(cn, ips, alternateDNS, daysValid) + if err != nil { + return cert, err + } + + cert.Cert, cert.Key, err = getCertAndKey(template, priv, template, priv) + + return cert, err +} + +func generateSignedCertificate( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, + ca certificate, +) (certificate, error) { + priv, err := rsa.GenerateKey(rand.Reader, 2048) + if err != nil { + return certificate{}, fmt.Errorf("error generating rsa key: %s", err) + } + return generateSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, ca, priv) +} + +func generateSignedCertificateWithPEMKey( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, + ca certificate, + privPEM string, +) (certificate, error) { + priv, err := parsePrivateKeyPEM(privPEM) + if err != nil { + return certificate{}, fmt.Errorf("parsing private key: %s", err) + } + return generateSignedCertificateWithKeyInternal(cn, ips, alternateDNS, daysValid, ca, priv) +} + +func generateSignedCertificateWithKeyInternal( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, + ca certificate, + priv crypto.PrivateKey, +) (certificate, error) { + cert := certificate{} + + decodedSignerCert, _ := pem.Decode([]byte(ca.Cert)) + if decodedSignerCert == nil { + return cert, errors.New("unable to decode certificate") + } + signerCert, err := x509.ParseCertificate(decodedSignerCert.Bytes) + if err != nil { + return cert, fmt.Errorf( + "error parsing certificate: decodedSignerCert.Bytes: %s", + err, + ) + } + signerKey, err := parsePrivateKeyPEM(ca.Key) + if err != nil { + return cert, fmt.Errorf( + "error parsing private key: %s", + err, + ) + } + + template, err := getBaseCertTemplate(cn, ips, alternateDNS, daysValid) + if err != nil { + return cert, err + } + + cert.Cert, cert.Key, err = getCertAndKey( + template, + priv, + signerCert, + signerKey, + ) + + return cert, err +} + +func getCertAndKey( + template *x509.Certificate, + signeeKey crypto.PrivateKey, + parent *x509.Certificate, + signingKey crypto.PrivateKey, +) (string, string, error) { + signeePubKey, err := getPublicKey(signeeKey) + if err != nil { + return "", "", fmt.Errorf("error retrieving public key from signee key: %s", err) + } + derBytes, err := x509.CreateCertificate( + rand.Reader, + template, + parent, + signeePubKey, + signingKey, + ) + if err != nil { + return "", "", fmt.Errorf("error creating certificate: %s", err) + } + + certBuffer := bytes.Buffer{} + if err := pem.Encode( + &certBuffer, + &pem.Block{Type: "CERTIFICATE", Bytes: derBytes}, + ); err != nil { + return "", "", fmt.Errorf("error pem-encoding certificate: %s", err) + } + + keyBuffer := bytes.Buffer{} + if err := pem.Encode( + &keyBuffer, + pemBlockForKey(signeeKey), + ); err != nil { + return "", "", fmt.Errorf("error pem-encoding key: %s", err) + } + + return certBuffer.String(), keyBuffer.String(), nil +} + +func getBaseCertTemplate( + cn string, + ips []interface{}, + alternateDNS []interface{}, + daysValid int, +) (*x509.Certificate, error) { + ipAddresses, err := getNetIPs(ips) + if err != nil { + return nil, err + } + dnsNames, err := getAlternateDNSStrs(alternateDNS) + if err != nil { + return nil, err + } + serialNumberUpperBound := new(big.Int).Lsh(big.NewInt(1), 128) + serialNumber, err := rand.Int(rand.Reader, serialNumberUpperBound) + if err != nil { + return nil, err + } + return &x509.Certificate{ + SerialNumber: serialNumber, + Subject: pkix.Name{ + CommonName: cn, + }, + IPAddresses: ipAddresses, + DNSNames: dnsNames, + NotBefore: time.Now(), + NotAfter: time.Now().Add(time.Hour * 24 * time.Duration(daysValid)), + KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature, + ExtKeyUsage: []x509.ExtKeyUsage{ + x509.ExtKeyUsageServerAuth, + x509.ExtKeyUsageClientAuth, + }, + BasicConstraintsValid: true, + }, nil +} + +func getNetIPs(ips []interface{}) ([]net.IP, error) { + if ips == nil { + return []net.IP{}, nil + } + var ipStr string + var ok bool + var netIP net.IP + netIPs := make([]net.IP, len(ips)) + for i, ip := range ips { + ipStr, ok = ip.(string) + if !ok { + return nil, fmt.Errorf("error parsing ip: %v is not a string", ip) + } + netIP = net.ParseIP(ipStr) + if netIP == nil { + return nil, fmt.Errorf("error parsing ip: %s", ipStr) + } + netIPs[i] = netIP + } + return netIPs, nil +} + +func getAlternateDNSStrs(alternateDNS []interface{}) ([]string, error) { + if alternateDNS == nil { + return []string{}, nil + } + var dnsStr string + var ok bool + alternateDNSStrs := make([]string, len(alternateDNS)) + for i, dns := range alternateDNS { + dnsStr, ok = dns.(string) + if !ok { + return nil, fmt.Errorf( + "error processing alternate dns name: %v is not a string", + dns, + ) + } + alternateDNSStrs[i] = dnsStr + } + return alternateDNSStrs, nil +} + +func encryptAES(password string, plaintext string) (string, error) { + if plaintext == "" { + return "", nil + } + + key := make([]byte, 32) + copy(key, []byte(password)) + block, err := aes.NewCipher(key) + if err != nil { + return "", err + } + + content := []byte(plaintext) + blockSize := block.BlockSize() + padding := blockSize - len(content)%blockSize + padtext := bytes.Repeat([]byte{byte(padding)}, padding) + content = append(content, padtext...) + + ciphertext := make([]byte, aes.BlockSize+len(content)) + + iv := ciphertext[:aes.BlockSize] + if _, err := io.ReadFull(rand.Reader, iv); err != nil { + return "", err + } + + mode := cipher.NewCBCEncrypter(block, iv) + mode.CryptBlocks(ciphertext[aes.BlockSize:], content) + + return base64.StdEncoding.EncodeToString(ciphertext), nil +} + +func decryptAES(password string, crypt64 string) (string, error) { + if crypt64 == "" { + return "", nil + } + + key := make([]byte, 32) + copy(key, []byte(password)) + + crypt, err := base64.StdEncoding.DecodeString(crypt64) + if err != nil { + return "", err + } + + block, err := aes.NewCipher(key) + if err != nil { + return "", err + } + + iv := crypt[:aes.BlockSize] + crypt = crypt[aes.BlockSize:] + decrypted := make([]byte, len(crypt)) + mode := cipher.NewCBCDecrypter(block, iv) + mode.CryptBlocks(decrypted, crypt) + + return string(decrypted[:len(decrypted)-int(decrypted[len(decrypted)-1])]), nil +} diff --git a/vendor/github.com/Masterminds/sprig/v3/date.go b/vendor/github.com/Masterminds/sprig/v3/date.go new file mode 100644 index 0000000000..ed022ddaca --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/date.go @@ -0,0 +1,152 @@ +package sprig + +import ( + "strconv" + "time" +) + +// Given a format and a date, format the date string. +// +// Date can be a `time.Time` or an `int, int32, int64`. +// In the later case, it is treated as seconds since UNIX +// epoch. +func date(fmt string, date interface{}) string { + return dateInZone(fmt, date, "Local") +} + +func htmlDate(date interface{}) string { + return dateInZone("2006-01-02", date, "Local") +} + +func htmlDateInZone(date interface{}, zone string) string { + return dateInZone("2006-01-02", date, zone) +} + +func dateInZone(fmt string, date interface{}, zone string) string { + var t time.Time + switch date := date.(type) { + default: + t = time.Now() + case time.Time: + t = date + case *time.Time: + t = *date + case int64: + t = time.Unix(date, 0) + case int: + t = time.Unix(int64(date), 0) + case int32: + t = time.Unix(int64(date), 0) + } + + loc, err := time.LoadLocation(zone) + if err != nil { + loc, _ = time.LoadLocation("UTC") + } + + return t.In(loc).Format(fmt) +} + +func dateModify(fmt string, date time.Time) time.Time { + d, err := time.ParseDuration(fmt) + if err != nil { + return date + } + return date.Add(d) +} + +func mustDateModify(fmt string, date time.Time) (time.Time, error) { + d, err := time.ParseDuration(fmt) + if err != nil { + return time.Time{}, err + } + return date.Add(d), nil +} + +func dateAgo(date interface{}) string { + var t time.Time + + switch date := date.(type) { + default: + t = time.Now() + case time.Time: + t = date + case int64: + t = time.Unix(date, 0) + case int: + t = time.Unix(int64(date), 0) + } + // Drop resolution to seconds + duration := time.Since(t).Round(time.Second) + return duration.String() +} + +func duration(sec interface{}) string { + var n int64 + switch value := sec.(type) { + default: + n = 0 + case string: + n, _ = strconv.ParseInt(value, 10, 64) + case int64: + n = value + } + return (time.Duration(n) * time.Second).String() +} + +func durationRound(duration interface{}) string { + var d time.Duration + switch duration := duration.(type) { + default: + d = 0 + case string: + d, _ = time.ParseDuration(duration) + case int64: + d = time.Duration(duration) + case time.Time: + d = time.Since(duration) + } + + u := uint64(d) + neg := d < 0 + if neg { + u = -u + } + + var ( + year = uint64(time.Hour) * 24 * 365 + month = uint64(time.Hour) * 24 * 30 + day = uint64(time.Hour) * 24 + hour = uint64(time.Hour) + minute = uint64(time.Minute) + second = uint64(time.Second) + ) + switch { + case u > year: + return strconv.FormatUint(u/year, 10) + "y" + case u > month: + return strconv.FormatUint(u/month, 10) + "mo" + case u > day: + return strconv.FormatUint(u/day, 10) + "d" + case u > hour: + return strconv.FormatUint(u/hour, 10) + "h" + case u > minute: + return strconv.FormatUint(u/minute, 10) + "m" + case u > second: + return strconv.FormatUint(u/second, 10) + "s" + } + return "0s" +} + +func toDate(fmt, str string) time.Time { + t, _ := time.ParseInLocation(fmt, str, time.Local) + return t +} + +func mustToDate(fmt, str string) (time.Time, error) { + return time.ParseInLocation(fmt, str, time.Local) +} + +func unixEpoch(date time.Time) string { + return strconv.FormatInt(date.Unix(), 10) +} diff --git a/vendor/github.com/Masterminds/sprig/v3/defaults.go b/vendor/github.com/Masterminds/sprig/v3/defaults.go new file mode 100644 index 0000000000..b9f979666d --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/defaults.go @@ -0,0 +1,163 @@ +package sprig + +import ( + "bytes" + "encoding/json" + "math/rand" + "reflect" + "strings" + "time" +) + +func init() { + rand.Seed(time.Now().UnixNano()) +} + +// dfault checks whether `given` is set, and returns default if not set. +// +// This returns `d` if `given` appears not to be set, and `given` otherwise. +// +// For numeric types 0 is unset. +// For strings, maps, arrays, and slices, len() = 0 is considered unset. +// For bool, false is unset. +// Structs are never considered unset. +// +// For everything else, including pointers, a nil value is unset. +func dfault(d interface{}, given ...interface{}) interface{} { + + if empty(given) || empty(given[0]) { + return d + } + return given[0] +} + +// empty returns true if the given value has the zero value for its type. +func empty(given interface{}) bool { + g := reflect.ValueOf(given) + if !g.IsValid() { + return true + } + + // Basically adapted from text/template.isTrue + switch g.Kind() { + default: + return g.IsNil() + case reflect.Array, reflect.Slice, reflect.Map, reflect.String: + return g.Len() == 0 + case reflect.Bool: + return !g.Bool() + case reflect.Complex64, reflect.Complex128: + return g.Complex() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return g.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return g.Uint() == 0 + case reflect.Float32, reflect.Float64: + return g.Float() == 0 + case reflect.Struct: + return false + } +} + +// coalesce returns the first non-empty value. +func coalesce(v ...interface{}) interface{} { + for _, val := range v { + if !empty(val) { + return val + } + } + return nil +} + +// all returns true if empty(x) is false for all values x in the list. +// If the list is empty, return true. +func all(v ...interface{}) bool { + for _, val := range v { + if empty(val) { + return false + } + } + return true +} + +// any returns true if empty(x) is false for any x in the list. +// If the list is empty, return false. +func any(v ...interface{}) bool { + for _, val := range v { + if !empty(val) { + return true + } + } + return false +} + +// fromJson decodes JSON into a structured value, ignoring errors. +func fromJson(v string) interface{} { + output, _ := mustFromJson(v) + return output +} + +// mustFromJson decodes JSON into a structured value, returning errors. +func mustFromJson(v string) (interface{}, error) { + var output interface{} + err := json.Unmarshal([]byte(v), &output) + return output, err +} + +// toJson encodes an item into a JSON string +func toJson(v interface{}) string { + output, _ := json.Marshal(v) + return string(output) +} + +func mustToJson(v interface{}) (string, error) { + output, err := json.Marshal(v) + if err != nil { + return "", err + } + return string(output), nil +} + +// toPrettyJson encodes an item into a pretty (indented) JSON string +func toPrettyJson(v interface{}) string { + output, _ := json.MarshalIndent(v, "", " ") + return string(output) +} + +func mustToPrettyJson(v interface{}) (string, error) { + output, err := json.MarshalIndent(v, "", " ") + if err != nil { + return "", err + } + return string(output), nil +} + +// toRawJson encodes an item into a JSON string with no escaping of HTML characters. +func toRawJson(v interface{}) string { + output, err := mustToRawJson(v) + if err != nil { + panic(err) + } + return string(output) +} + +// mustToRawJson encodes an item into a JSON string with no escaping of HTML characters. +func mustToRawJson(v interface{}) (string, error) { + buf := new(bytes.Buffer) + enc := json.NewEncoder(buf) + enc.SetEscapeHTML(false) + err := enc.Encode(&v) + if err != nil { + return "", err + } + return strings.TrimSuffix(buf.String(), "\n"), nil +} + +// ternary returns the first value if the last value is true, otherwise returns the second value. +func ternary(vt interface{}, vf interface{}, v bool) interface{} { + if v { + return vt + } + + return vf +} diff --git a/vendor/github.com/Masterminds/sprig/v3/dict.go b/vendor/github.com/Masterminds/sprig/v3/dict.go new file mode 100644 index 0000000000..ade8896984 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/dict.go @@ -0,0 +1,174 @@ +package sprig + +import ( + "github.com/imdario/mergo" + "github.com/mitchellh/copystructure" +) + +func get(d map[string]interface{}, key string) interface{} { + if val, ok := d[key]; ok { + return val + } + return "" +} + +func set(d map[string]interface{}, key string, value interface{}) map[string]interface{} { + d[key] = value + return d +} + +func unset(d map[string]interface{}, key string) map[string]interface{} { + delete(d, key) + return d +} + +func hasKey(d map[string]interface{}, key string) bool { + _, ok := d[key] + return ok +} + +func pluck(key string, d ...map[string]interface{}) []interface{} { + res := []interface{}{} + for _, dict := range d { + if val, ok := dict[key]; ok { + res = append(res, val) + } + } + return res +} + +func keys(dicts ...map[string]interface{}) []string { + k := []string{} + for _, dict := range dicts { + for key := range dict { + k = append(k, key) + } + } + return k +} + +func pick(dict map[string]interface{}, keys ...string) map[string]interface{} { + res := map[string]interface{}{} + for _, k := range keys { + if v, ok := dict[k]; ok { + res[k] = v + } + } + return res +} + +func omit(dict map[string]interface{}, keys ...string) map[string]interface{} { + res := map[string]interface{}{} + + omit := make(map[string]bool, len(keys)) + for _, k := range keys { + omit[k] = true + } + + for k, v := range dict { + if _, ok := omit[k]; !ok { + res[k] = v + } + } + return res +} + +func dict(v ...interface{}) map[string]interface{} { + dict := map[string]interface{}{} + lenv := len(v) + for i := 0; i < lenv; i += 2 { + key := strval(v[i]) + if i+1 >= lenv { + dict[key] = "" + continue + } + dict[key] = v[i+1] + } + return dict +} + +func merge(dst map[string]interface{}, srcs ...map[string]interface{}) interface{} { + for _, src := range srcs { + if err := mergo.Merge(&dst, src); err != nil { + // Swallow errors inside of a template. + return "" + } + } + return dst +} + +func mustMerge(dst map[string]interface{}, srcs ...map[string]interface{}) (interface{}, error) { + for _, src := range srcs { + if err := mergo.Merge(&dst, src); err != nil { + return nil, err + } + } + return dst, nil +} + +func mergeOverwrite(dst map[string]interface{}, srcs ...map[string]interface{}) interface{} { + for _, src := range srcs { + if err := mergo.MergeWithOverwrite(&dst, src); err != nil { + // Swallow errors inside of a template. + return "" + } + } + return dst +} + +func mustMergeOverwrite(dst map[string]interface{}, srcs ...map[string]interface{}) (interface{}, error) { + for _, src := range srcs { + if err := mergo.MergeWithOverwrite(&dst, src); err != nil { + return nil, err + } + } + return dst, nil +} + +func values(dict map[string]interface{}) []interface{} { + values := []interface{}{} + for _, value := range dict { + values = append(values, value) + } + + return values +} + +func deepCopy(i interface{}) interface{} { + c, err := mustDeepCopy(i) + if err != nil { + panic("deepCopy error: " + err.Error()) + } + + return c +} + +func mustDeepCopy(i interface{}) (interface{}, error) { + return copystructure.Copy(i) +} + +func dig(ps ...interface{}) (interface{}, error) { + if len(ps) < 3 { + panic("dig needs at least three arguments") + } + dict := ps[len(ps)-1].(map[string]interface{}) + def := ps[len(ps)-2] + ks := make([]string, len(ps)-2) + for i := 0; i < len(ks); i++ { + ks[i] = ps[i].(string) + } + + return digFromDict(dict, def, ks) +} + +func digFromDict(dict map[string]interface{}, d interface{}, ks []string) (interface{}, error) { + k, ns := ks[0], ks[1:len(ks)] + step, has := dict[k] + if !has { + return d, nil + } + if len(ns) == 0 { + return step, nil + } + return digFromDict(step.(map[string]interface{}), d, ns) +} diff --git a/vendor/github.com/Masterminds/sprig/v3/doc.go b/vendor/github.com/Masterminds/sprig/v3/doc.go new file mode 100644 index 0000000000..aabb9d4489 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/doc.go @@ -0,0 +1,19 @@ +/* +Package sprig provides template functions for Go. + +This package contains a number of utility functions for working with data +inside of Go `html/template` and `text/template` files. + +To add these functions, use the `template.Funcs()` method: + + t := templates.New("foo").Funcs(sprig.FuncMap()) + +Note that you should add the function map before you parse any template files. + + In several cases, Sprig reverses the order of arguments from the way they + appear in the standard library. This is to make it easier to pipe + arguments into functions. + +See http://masterminds.github.io/sprig/ for more detailed documentation on each of the available functions. +*/ +package sprig diff --git a/vendor/github.com/Masterminds/sprig/v3/functions.go b/vendor/github.com/Masterminds/sprig/v3/functions.go new file mode 100644 index 0000000000..57fcec1d9e --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/functions.go @@ -0,0 +1,382 @@ +package sprig + +import ( + "errors" + "html/template" + "math/rand" + "os" + "path" + "path/filepath" + "reflect" + "strconv" + "strings" + ttemplate "text/template" + "time" + + util "github.com/Masterminds/goutils" + "github.com/huandu/xstrings" + "github.com/shopspring/decimal" +) + +// FuncMap produces the function map. +// +// Use this to pass the functions into the template engine: +// +// tpl := template.New("foo").Funcs(sprig.FuncMap())) +// +func FuncMap() template.FuncMap { + return HtmlFuncMap() +} + +// HermeticTxtFuncMap returns a 'text/template'.FuncMap with only repeatable functions. +func HermeticTxtFuncMap() ttemplate.FuncMap { + r := TxtFuncMap() + for _, name := range nonhermeticFunctions { + delete(r, name) + } + return r +} + +// HermeticHtmlFuncMap returns an 'html/template'.Funcmap with only repeatable functions. +func HermeticHtmlFuncMap() template.FuncMap { + r := HtmlFuncMap() + for _, name := range nonhermeticFunctions { + delete(r, name) + } + return r +} + +// TxtFuncMap returns a 'text/template'.FuncMap +func TxtFuncMap() ttemplate.FuncMap { + return ttemplate.FuncMap(GenericFuncMap()) +} + +// HtmlFuncMap returns an 'html/template'.Funcmap +func HtmlFuncMap() template.FuncMap { + return template.FuncMap(GenericFuncMap()) +} + +// GenericFuncMap returns a copy of the basic function map as a map[string]interface{}. +func GenericFuncMap() map[string]interface{} { + gfm := make(map[string]interface{}, len(genericMap)) + for k, v := range genericMap { + gfm[k] = v + } + return gfm +} + +// These functions are not guaranteed to evaluate to the same result for given input, because they +// refer to the environment or global state. +var nonhermeticFunctions = []string{ + // Date functions + "date", + "date_in_zone", + "date_modify", + "now", + "htmlDate", + "htmlDateInZone", + "dateInZone", + "dateModify", + + // Strings + "randAlphaNum", + "randAlpha", + "randAscii", + "randNumeric", + "randBytes", + "uuidv4", + + // OS + "env", + "expandenv", + + // Network + "getHostByName", +} + +var genericMap = map[string]interface{}{ + "hello": func() string { return "Hello!" }, + + // Date functions + "ago": dateAgo, + "date": date, + "date_in_zone": dateInZone, + "date_modify": dateModify, + "dateInZone": dateInZone, + "dateModify": dateModify, + "duration": duration, + "durationRound": durationRound, + "htmlDate": htmlDate, + "htmlDateInZone": htmlDateInZone, + "must_date_modify": mustDateModify, + "mustDateModify": mustDateModify, + "mustToDate": mustToDate, + "now": time.Now, + "toDate": toDate, + "unixEpoch": unixEpoch, + + // Strings + "abbrev": abbrev, + "abbrevboth": abbrevboth, + "trunc": trunc, + "trim": strings.TrimSpace, + "upper": strings.ToUpper, + "lower": strings.ToLower, + "title": strings.Title, + "untitle": untitle, + "substr": substring, + // Switch order so that "foo" | repeat 5 + "repeat": func(count int, str string) string { return strings.Repeat(str, count) }, + // Deprecated: Use trimAll. + "trimall": func(a, b string) string { return strings.Trim(b, a) }, + // Switch order so that "$foo" | trimall "$" + "trimAll": func(a, b string) string { return strings.Trim(b, a) }, + "trimSuffix": func(a, b string) string { return strings.TrimSuffix(b, a) }, + "trimPrefix": func(a, b string) string { return strings.TrimPrefix(b, a) }, + "nospace": util.DeleteWhiteSpace, + "initials": initials, + "randAlphaNum": randAlphaNumeric, + "randAlpha": randAlpha, + "randAscii": randAscii, + "randNumeric": randNumeric, + "swapcase": util.SwapCase, + "shuffle": xstrings.Shuffle, + "snakecase": xstrings.ToSnakeCase, + "camelcase": xstrings.ToCamelCase, + "kebabcase": xstrings.ToKebabCase, + "wrap": func(l int, s string) string { return util.Wrap(s, l) }, + "wrapWith": func(l int, sep, str string) string { return util.WrapCustom(str, l, sep, true) }, + // Switch order so that "foobar" | contains "foo" + "contains": func(substr string, str string) bool { return strings.Contains(str, substr) }, + "hasPrefix": func(substr string, str string) bool { return strings.HasPrefix(str, substr) }, + "hasSuffix": func(substr string, str string) bool { return strings.HasSuffix(str, substr) }, + "quote": quote, + "squote": squote, + "cat": cat, + "indent": indent, + "nindent": nindent, + "replace": replace, + "plural": plural, + "sha1sum": sha1sum, + "sha256sum": sha256sum, + "adler32sum": adler32sum, + "toString": strval, + + // Wrap Atoi to stop errors. + "atoi": func(a string) int { i, _ := strconv.Atoi(a); return i }, + "int64": toInt64, + "int": toInt, + "float64": toFloat64, + "seq": seq, + "toDecimal": toDecimal, + + //"gt": func(a, b int) bool {return a > b}, + //"gte": func(a, b int) bool {return a >= b}, + //"lt": func(a, b int) bool {return a < b}, + //"lte": func(a, b int) bool {return a <= b}, + + // split "/" foo/bar returns map[int]string{0: foo, 1: bar} + "split": split, + "splitList": func(sep, orig string) []string { return strings.Split(orig, sep) }, + // splitn "/" foo/bar/fuu returns map[int]string{0: foo, 1: bar/fuu} + "splitn": splitn, + "toStrings": strslice, + + "until": until, + "untilStep": untilStep, + + // VERY basic arithmetic. + "add1": func(i interface{}) int64 { return toInt64(i) + 1 }, + "add": func(i ...interface{}) int64 { + var a int64 = 0 + for _, b := range i { + a += toInt64(b) + } + return a + }, + "sub": func(a, b interface{}) int64 { return toInt64(a) - toInt64(b) }, + "div": func(a, b interface{}) int64 { return toInt64(a) / toInt64(b) }, + "mod": func(a, b interface{}) int64 { return toInt64(a) % toInt64(b) }, + "mul": func(a interface{}, v ...interface{}) int64 { + val := toInt64(a) + for _, b := range v { + val = val * toInt64(b) + } + return val + }, + "randInt": func(min, max int) int { return rand.Intn(max-min) + min }, + "add1f": func(i interface{}) float64 { + return execDecimalOp(i, []interface{}{1}, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Add(d2) }) + }, + "addf": func(i ...interface{}) float64 { + a := interface{}(float64(0)) + return execDecimalOp(a, i, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Add(d2) }) + }, + "subf": func(a interface{}, v ...interface{}) float64 { + return execDecimalOp(a, v, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Sub(d2) }) + }, + "divf": func(a interface{}, v ...interface{}) float64 { + return execDecimalOp(a, v, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Div(d2) }) + }, + "mulf": func(a interface{}, v ...interface{}) float64 { + return execDecimalOp(a, v, func(d1, d2 decimal.Decimal) decimal.Decimal { return d1.Mul(d2) }) + }, + "biggest": max, + "max": max, + "min": min, + "maxf": maxf, + "minf": minf, + "ceil": ceil, + "floor": floor, + "round": round, + + // string slices. Note that we reverse the order b/c that's better + // for template processing. + "join": join, + "sortAlpha": sortAlpha, + + // Defaults + "default": dfault, + "empty": empty, + "coalesce": coalesce, + "all": all, + "any": any, + "compact": compact, + "mustCompact": mustCompact, + "fromJson": fromJson, + "toJson": toJson, + "toPrettyJson": toPrettyJson, + "toRawJson": toRawJson, + "mustFromJson": mustFromJson, + "mustToJson": mustToJson, + "mustToPrettyJson": mustToPrettyJson, + "mustToRawJson": mustToRawJson, + "ternary": ternary, + "deepCopy": deepCopy, + "mustDeepCopy": mustDeepCopy, + + // Reflection + "typeOf": typeOf, + "typeIs": typeIs, + "typeIsLike": typeIsLike, + "kindOf": kindOf, + "kindIs": kindIs, + "deepEqual": reflect.DeepEqual, + + // OS: + "env": os.Getenv, + "expandenv": os.ExpandEnv, + + // Network: + "getHostByName": getHostByName, + + // Paths: + "base": path.Base, + "dir": path.Dir, + "clean": path.Clean, + "ext": path.Ext, + "isAbs": path.IsAbs, + + // Filepaths: + "osBase": filepath.Base, + "osClean": filepath.Clean, + "osDir": filepath.Dir, + "osExt": filepath.Ext, + "osIsAbs": filepath.IsAbs, + + // Encoding: + "b64enc": base64encode, + "b64dec": base64decode, + "b32enc": base32encode, + "b32dec": base32decode, + + // Data Structures: + "tuple": list, // FIXME: with the addition of append/prepend these are no longer immutable. + "list": list, + "dict": dict, + "get": get, + "set": set, + "unset": unset, + "hasKey": hasKey, + "pluck": pluck, + "keys": keys, + "pick": pick, + "omit": omit, + "merge": merge, + "mergeOverwrite": mergeOverwrite, + "mustMerge": mustMerge, + "mustMergeOverwrite": mustMergeOverwrite, + "values": values, + + "append": push, "push": push, + "mustAppend": mustPush, "mustPush": mustPush, + "prepend": prepend, + "mustPrepend": mustPrepend, + "first": first, + "mustFirst": mustFirst, + "rest": rest, + "mustRest": mustRest, + "last": last, + "mustLast": mustLast, + "initial": initial, + "mustInitial": mustInitial, + "reverse": reverse, + "mustReverse": mustReverse, + "uniq": uniq, + "mustUniq": mustUniq, + "without": without, + "mustWithout": mustWithout, + "has": has, + "mustHas": mustHas, + "slice": slice, + "mustSlice": mustSlice, + "concat": concat, + "dig": dig, + "chunk": chunk, + "mustChunk": mustChunk, + + // Crypto: + "bcrypt": bcrypt, + "htpasswd": htpasswd, + "genPrivateKey": generatePrivateKey, + "derivePassword": derivePassword, + "buildCustomCert": buildCustomCertificate, + "genCA": generateCertificateAuthority, + "genCAWithKey": generateCertificateAuthorityWithPEMKey, + "genSelfSignedCert": generateSelfSignedCertificate, + "genSelfSignedCertWithKey": generateSelfSignedCertificateWithPEMKey, + "genSignedCert": generateSignedCertificate, + "genSignedCertWithKey": generateSignedCertificateWithPEMKey, + "encryptAES": encryptAES, + "decryptAES": decryptAES, + "randBytes": randBytes, + + // UUIDs: + "uuidv4": uuidv4, + + // SemVer: + "semver": semver, + "semverCompare": semverCompare, + + // Flow Control: + "fail": func(msg string) (string, error) { return "", errors.New(msg) }, + + // Regex + "regexMatch": regexMatch, + "mustRegexMatch": mustRegexMatch, + "regexFindAll": regexFindAll, + "mustRegexFindAll": mustRegexFindAll, + "regexFind": regexFind, + "mustRegexFind": mustRegexFind, + "regexReplaceAll": regexReplaceAll, + "mustRegexReplaceAll": mustRegexReplaceAll, + "regexReplaceAllLiteral": regexReplaceAllLiteral, + "mustRegexReplaceAllLiteral": mustRegexReplaceAllLiteral, + "regexSplit": regexSplit, + "mustRegexSplit": mustRegexSplit, + "regexQuoteMeta": regexQuoteMeta, + + // URLs: + "urlParse": urlParse, + "urlJoin": urlJoin, +} diff --git a/vendor/github.com/Masterminds/sprig/v3/list.go b/vendor/github.com/Masterminds/sprig/v3/list.go new file mode 100644 index 0000000000..ca0fbb7893 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/list.go @@ -0,0 +1,464 @@ +package sprig + +import ( + "fmt" + "math" + "reflect" + "sort" +) + +// Reflection is used in these functions so that slices and arrays of strings, +// ints, and other types not implementing []interface{} can be worked with. +// For example, this is useful if you need to work on the output of regexs. + +func list(v ...interface{}) []interface{} { + return v +} + +func push(list interface{}, v interface{}) []interface{} { + l, err := mustPush(list, v) + if err != nil { + panic(err) + } + + return l +} + +func mustPush(list interface{}, v interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[i] = l2.Index(i).Interface() + } + + return append(nl, v), nil + + default: + return nil, fmt.Errorf("Cannot push on type %s", tp) + } +} + +func prepend(list interface{}, v interface{}) []interface{} { + l, err := mustPrepend(list, v) + if err != nil { + panic(err) + } + + return l +} + +func mustPrepend(list interface{}, v interface{}) ([]interface{}, error) { + //return append([]interface{}{v}, list...) + + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[i] = l2.Index(i).Interface() + } + + return append([]interface{}{v}, nl...), nil + + default: + return nil, fmt.Errorf("Cannot prepend on type %s", tp) + } +} + +func chunk(size int, list interface{}) [][]interface{} { + l, err := mustChunk(size, list) + if err != nil { + panic(err) + } + + return l +} + +func mustChunk(size int, list interface{}) ([][]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + + cs := int(math.Floor(float64(l-1)/float64(size)) + 1) + nl := make([][]interface{}, cs) + + for i := 0; i < cs; i++ { + clen := size + if i == cs-1 { + clen = int(math.Floor(math.Mod(float64(l), float64(size)))) + if clen == 0 { + clen = size + } + } + + nl[i] = make([]interface{}, clen) + + for j := 0; j < clen; j++ { + ix := i*size + j + nl[i][j] = l2.Index(ix).Interface() + } + } + + return nl, nil + + default: + return nil, fmt.Errorf("Cannot chunk type %s", tp) + } +} + +func last(list interface{}) interface{} { + l, err := mustLast(list) + if err != nil { + panic(err) + } + + return l +} + +func mustLast(list interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + return l2.Index(l - 1).Interface(), nil + default: + return nil, fmt.Errorf("Cannot find last on type %s", tp) + } +} + +func first(list interface{}) interface{} { + l, err := mustFirst(list) + if err != nil { + panic(err) + } + + return l +} + +func mustFirst(list interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + return l2.Index(0).Interface(), nil + default: + return nil, fmt.Errorf("Cannot find first on type %s", tp) + } +} + +func rest(list interface{}) []interface{} { + l, err := mustRest(list) + if err != nil { + panic(err) + } + + return l +} + +func mustRest(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + nl := make([]interface{}, l-1) + for i := 1; i < l; i++ { + nl[i-1] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find rest on type %s", tp) + } +} + +func initial(list interface{}) []interface{} { + l, err := mustInitial(list) + if err != nil { + panic(err) + } + + return l +} + +func mustInitial(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + nl := make([]interface{}, l-1) + for i := 0; i < l-1; i++ { + nl[i] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find initial on type %s", tp) + } +} + +func sortAlpha(list interface{}) []string { + k := reflect.Indirect(reflect.ValueOf(list)).Kind() + switch k { + case reflect.Slice, reflect.Array: + a := strslice(list) + s := sort.StringSlice(a) + s.Sort() + return s + } + return []string{strval(list)} +} + +func reverse(v interface{}) []interface{} { + l, err := mustReverse(v) + if err != nil { + panic(err) + } + + return l +} + +func mustReverse(v interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(v).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(v) + + l := l2.Len() + // We do not sort in place because the incoming array should not be altered. + nl := make([]interface{}, l) + for i := 0; i < l; i++ { + nl[l-i-1] = l2.Index(i).Interface() + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot find reverse on type %s", tp) + } +} + +func compact(list interface{}) []interface{} { + l, err := mustCompact(list) + if err != nil { + panic(err) + } + + return l +} + +func mustCompact(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + nl := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !empty(item) { + nl = append(nl, item) + } + } + + return nl, nil + default: + return nil, fmt.Errorf("Cannot compact on type %s", tp) + } +} + +func uniq(list interface{}) []interface{} { + l, err := mustUniq(list) + if err != nil { + panic(err) + } + + return l +} + +func mustUniq(list interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + dest := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !inList(dest, item) { + dest = append(dest, item) + } + } + + return dest, nil + default: + return nil, fmt.Errorf("Cannot find uniq on type %s", tp) + } +} + +func inList(haystack []interface{}, needle interface{}) bool { + for _, h := range haystack { + if reflect.DeepEqual(needle, h) { + return true + } + } + return false +} + +func without(list interface{}, omit ...interface{}) []interface{} { + l, err := mustWithout(list, omit...) + if err != nil { + panic(err) + } + + return l +} + +func mustWithout(list interface{}, omit ...interface{}) ([]interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + res := []interface{}{} + var item interface{} + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if !inList(omit, item) { + res = append(res, item) + } + } + + return res, nil + default: + return nil, fmt.Errorf("Cannot find without on type %s", tp) + } +} + +func has(needle interface{}, haystack interface{}) bool { + l, err := mustHas(needle, haystack) + if err != nil { + panic(err) + } + + return l +} + +func mustHas(needle interface{}, haystack interface{}) (bool, error) { + if haystack == nil { + return false, nil + } + tp := reflect.TypeOf(haystack).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(haystack) + var item interface{} + l := l2.Len() + for i := 0; i < l; i++ { + item = l2.Index(i).Interface() + if reflect.DeepEqual(needle, item) { + return true, nil + } + } + + return false, nil + default: + return false, fmt.Errorf("Cannot find has on type %s", tp) + } +} + +// $list := [1, 2, 3, 4, 5] +// slice $list -> list[0:5] = list[:] +// slice $list 0 3 -> list[0:3] = list[:3] +// slice $list 3 5 -> list[3:5] +// slice $list 3 -> list[3:5] = list[3:] +func slice(list interface{}, indices ...interface{}) interface{} { + l, err := mustSlice(list, indices...) + if err != nil { + panic(err) + } + + return l +} + +func mustSlice(list interface{}, indices ...interface{}) (interface{}, error) { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + + l := l2.Len() + if l == 0 { + return nil, nil + } + + var start, end int + if len(indices) > 0 { + start = toInt(indices[0]) + } + if len(indices) < 2 { + end = l + } else { + end = toInt(indices[1]) + } + + return l2.Slice(start, end).Interface(), nil + default: + return nil, fmt.Errorf("list should be type of slice or array but %s", tp) + } +} + +func concat(lists ...interface{}) interface{} { + var res []interface{} + for _, list := range lists { + tp := reflect.TypeOf(list).Kind() + switch tp { + case reflect.Slice, reflect.Array: + l2 := reflect.ValueOf(list) + for i := 0; i < l2.Len(); i++ { + res = append(res, l2.Index(i).Interface()) + } + default: + panic(fmt.Sprintf("Cannot concat type %s as list", tp)) + } + } + return res +} diff --git a/vendor/github.com/Masterminds/sprig/v3/network.go b/vendor/github.com/Masterminds/sprig/v3/network.go new file mode 100644 index 0000000000..108d78a946 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/network.go @@ -0,0 +1,12 @@ +package sprig + +import ( + "math/rand" + "net" +) + +func getHostByName(name string) string { + addrs, _ := net.LookupHost(name) + //TODO: add error handing when release v3 comes out + return addrs[rand.Intn(len(addrs))] +} diff --git a/vendor/github.com/Masterminds/sprig/v3/numeric.go b/vendor/github.com/Masterminds/sprig/v3/numeric.go new file mode 100644 index 0000000000..f68e4182ee --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/numeric.go @@ -0,0 +1,186 @@ +package sprig + +import ( + "fmt" + "math" + "strconv" + "strings" + + "github.com/spf13/cast" + "github.com/shopspring/decimal" +) + +// toFloat64 converts 64-bit floats +func toFloat64(v interface{}) float64 { + return cast.ToFloat64(v) +} + +func toInt(v interface{}) int { + return cast.ToInt(v) +} + +// toInt64 converts integer types to 64-bit integers +func toInt64(v interface{}) int64 { + return cast.ToInt64(v) +} + +func max(a interface{}, i ...interface{}) int64 { + aa := toInt64(a) + for _, b := range i { + bb := toInt64(b) + if bb > aa { + aa = bb + } + } + return aa +} + +func maxf(a interface{}, i ...interface{}) float64 { + aa := toFloat64(a) + for _, b := range i { + bb := toFloat64(b) + aa = math.Max(aa, bb) + } + return aa +} + +func min(a interface{}, i ...interface{}) int64 { + aa := toInt64(a) + for _, b := range i { + bb := toInt64(b) + if bb < aa { + aa = bb + } + } + return aa +} + +func minf(a interface{}, i ...interface{}) float64 { + aa := toFloat64(a) + for _, b := range i { + bb := toFloat64(b) + aa = math.Min(aa, bb) + } + return aa +} + +func until(count int) []int { + step := 1 + if count < 0 { + step = -1 + } + return untilStep(0, count, step) +} + +func untilStep(start, stop, step int) []int { + v := []int{} + + if stop < start { + if step >= 0 { + return v + } + for i := start; i > stop; i += step { + v = append(v, i) + } + return v + } + + if step <= 0 { + return v + } + for i := start; i < stop; i += step { + v = append(v, i) + } + return v +} + +func floor(a interface{}) float64 { + aa := toFloat64(a) + return math.Floor(aa) +} + +func ceil(a interface{}) float64 { + aa := toFloat64(a) + return math.Ceil(aa) +} + +func round(a interface{}, p int, rOpt ...float64) float64 { + roundOn := .5 + if len(rOpt) > 0 { + roundOn = rOpt[0] + } + val := toFloat64(a) + places := toFloat64(p) + + var round float64 + pow := math.Pow(10, places) + digit := pow * val + _, div := math.Modf(digit) + if div >= roundOn { + round = math.Ceil(digit) + } else { + round = math.Floor(digit) + } + return round / pow +} + +// converts unix octal to decimal +func toDecimal(v interface{}) int64 { + result, err := strconv.ParseInt(fmt.Sprint(v), 8, 64) + if err != nil { + return 0 + } + return result +} + +func seq(params ...int) string { + increment := 1 + switch len(params) { + case 0: + return "" + case 1: + start := 1 + end := params[0] + if end < start { + increment = -1 + } + return intArrayToString(untilStep(start, end+increment, increment), " ") + case 3: + start := params[0] + end := params[2] + step := params[1] + if end < start { + increment = -1 + if step > 0 { + return "" + } + } + return intArrayToString(untilStep(start, end+increment, step), " ") + case 2: + start := params[0] + end := params[1] + step := 1 + if end < start { + step = -1 + } + return intArrayToString(untilStep(start, end+step, step), " ") + default: + return "" + } +} + +func intArrayToString(slice []int, delimeter string) string { + return strings.Trim(strings.Join(strings.Fields(fmt.Sprint(slice)), delimeter), "[]") +} + +// performs a float and subsequent decimal.Decimal conversion on inputs, +// and iterates through a and b executing the mathmetical operation f +func execDecimalOp(a interface{}, b []interface{}, f func(d1, d2 decimal.Decimal) decimal.Decimal) float64 { + prt := decimal.NewFromFloat(toFloat64(a)) + for _, x := range b { + dx := decimal.NewFromFloat(toFloat64(x)) + prt = f(prt, dx) + } + rslt, _ := prt.Float64() + return rslt +} diff --git a/vendor/github.com/Masterminds/sprig/v3/reflect.go b/vendor/github.com/Masterminds/sprig/v3/reflect.go new file mode 100644 index 0000000000..8a65c132f0 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/reflect.go @@ -0,0 +1,28 @@ +package sprig + +import ( + "fmt" + "reflect" +) + +// typeIs returns true if the src is the type named in target. +func typeIs(target string, src interface{}) bool { + return target == typeOf(src) +} + +func typeIsLike(target string, src interface{}) bool { + t := typeOf(src) + return target == t || "*"+target == t +} + +func typeOf(src interface{}) string { + return fmt.Sprintf("%T", src) +} + +func kindIs(target string, src interface{}) bool { + return target == kindOf(src) +} + +func kindOf(src interface{}) string { + return reflect.ValueOf(src).Kind().String() +} diff --git a/vendor/github.com/Masterminds/sprig/v3/regex.go b/vendor/github.com/Masterminds/sprig/v3/regex.go new file mode 100644 index 0000000000..fab5510189 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/regex.go @@ -0,0 +1,83 @@ +package sprig + +import ( + "regexp" +) + +func regexMatch(regex string, s string) bool { + match, _ := regexp.MatchString(regex, s) + return match +} + +func mustRegexMatch(regex string, s string) (bool, error) { + return regexp.MatchString(regex, s) +} + +func regexFindAll(regex string, s string, n int) []string { + r := regexp.MustCompile(regex) + return r.FindAllString(s, n) +} + +func mustRegexFindAll(regex string, s string, n int) ([]string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return []string{}, err + } + return r.FindAllString(s, n), nil +} + +func regexFind(regex string, s string) string { + r := regexp.MustCompile(regex) + return r.FindString(s) +} + +func mustRegexFind(regex string, s string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.FindString(s), nil +} + +func regexReplaceAll(regex string, s string, repl string) string { + r := regexp.MustCompile(regex) + return r.ReplaceAllString(s, repl) +} + +func mustRegexReplaceAll(regex string, s string, repl string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.ReplaceAllString(s, repl), nil +} + +func regexReplaceAllLiteral(regex string, s string, repl string) string { + r := regexp.MustCompile(regex) + return r.ReplaceAllLiteralString(s, repl) +} + +func mustRegexReplaceAllLiteral(regex string, s string, repl string) (string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return "", err + } + return r.ReplaceAllLiteralString(s, repl), nil +} + +func regexSplit(regex string, s string, n int) []string { + r := regexp.MustCompile(regex) + return r.Split(s, n) +} + +func mustRegexSplit(regex string, s string, n int) ([]string, error) { + r, err := regexp.Compile(regex) + if err != nil { + return []string{}, err + } + return r.Split(s, n), nil +} + +func regexQuoteMeta(s string) string { + return regexp.QuoteMeta(s) +} diff --git a/vendor/github.com/Masterminds/sprig/v3/semver.go b/vendor/github.com/Masterminds/sprig/v3/semver.go new file mode 100644 index 0000000000..3fbe08aa63 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/semver.go @@ -0,0 +1,23 @@ +package sprig + +import ( + sv2 "github.com/Masterminds/semver/v3" +) + +func semverCompare(constraint, version string) (bool, error) { + c, err := sv2.NewConstraint(constraint) + if err != nil { + return false, err + } + + v, err := sv2.NewVersion(version) + if err != nil { + return false, err + } + + return c.Check(v), nil +} + +func semver(version string) (*sv2.Version, error) { + return sv2.NewVersion(version) +} diff --git a/vendor/github.com/Masterminds/sprig/v3/strings.go b/vendor/github.com/Masterminds/sprig/v3/strings.go new file mode 100644 index 0000000000..e0ae628c84 --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/strings.go @@ -0,0 +1,236 @@ +package sprig + +import ( + "encoding/base32" + "encoding/base64" + "fmt" + "reflect" + "strconv" + "strings" + + util "github.com/Masterminds/goutils" +) + +func base64encode(v string) string { + return base64.StdEncoding.EncodeToString([]byte(v)) +} + +func base64decode(v string) string { + data, err := base64.StdEncoding.DecodeString(v) + if err != nil { + return err.Error() + } + return string(data) +} + +func base32encode(v string) string { + return base32.StdEncoding.EncodeToString([]byte(v)) +} + +func base32decode(v string) string { + data, err := base32.StdEncoding.DecodeString(v) + if err != nil { + return err.Error() + } + return string(data) +} + +func abbrev(width int, s string) string { + if width < 4 { + return s + } + r, _ := util.Abbreviate(s, width) + return r +} + +func abbrevboth(left, right int, s string) string { + if right < 4 || left > 0 && right < 7 { + return s + } + r, _ := util.AbbreviateFull(s, left, right) + return r +} +func initials(s string) string { + // Wrap this just to eliminate the var args, which templates don't do well. + return util.Initials(s) +} + +func randAlphaNumeric(count int) string { + // It is not possible, it appears, to actually generate an error here. + r, _ := util.CryptoRandomAlphaNumeric(count) + return r +} + +func randAlpha(count int) string { + r, _ := util.CryptoRandomAlphabetic(count) + return r +} + +func randAscii(count int) string { + r, _ := util.CryptoRandomAscii(count) + return r +} + +func randNumeric(count int) string { + r, _ := util.CryptoRandomNumeric(count) + return r +} + +func untitle(str string) string { + return util.Uncapitalize(str) +} + +func quote(str ...interface{}) string { + out := make([]string, 0, len(str)) + for _, s := range str { + if s != nil { + out = append(out, fmt.Sprintf("%q", strval(s))) + } + } + return strings.Join(out, " ") +} + +func squote(str ...interface{}) string { + out := make([]string, 0, len(str)) + for _, s := range str { + if s != nil { + out = append(out, fmt.Sprintf("'%v'", s)) + } + } + return strings.Join(out, " ") +} + +func cat(v ...interface{}) string { + v = removeNilElements(v) + r := strings.TrimSpace(strings.Repeat("%v ", len(v))) + return fmt.Sprintf(r, v...) +} + +func indent(spaces int, v string) string { + pad := strings.Repeat(" ", spaces) + return pad + strings.Replace(v, "\n", "\n"+pad, -1) +} + +func nindent(spaces int, v string) string { + return "\n" + indent(spaces, v) +} + +func replace(old, new, src string) string { + return strings.Replace(src, old, new, -1) +} + +func plural(one, many string, count int) string { + if count == 1 { + return one + } + return many +} + +func strslice(v interface{}) []string { + switch v := v.(type) { + case []string: + return v + case []interface{}: + b := make([]string, 0, len(v)) + for _, s := range v { + if s != nil { + b = append(b, strval(s)) + } + } + return b + default: + val := reflect.ValueOf(v) + switch val.Kind() { + case reflect.Array, reflect.Slice: + l := val.Len() + b := make([]string, 0, l) + for i := 0; i < l; i++ { + value := val.Index(i).Interface() + if value != nil { + b = append(b, strval(value)) + } + } + return b + default: + if v == nil { + return []string{} + } + + return []string{strval(v)} + } + } +} + +func removeNilElements(v []interface{}) []interface{} { + newSlice := make([]interface{}, 0, len(v)) + for _, i := range v { + if i != nil { + newSlice = append(newSlice, i) + } + } + return newSlice +} + +func strval(v interface{}) string { + switch v := v.(type) { + case string: + return v + case []byte: + return string(v) + case error: + return v.Error() + case fmt.Stringer: + return v.String() + default: + return fmt.Sprintf("%v", v) + } +} + +func trunc(c int, s string) string { + if c < 0 && len(s)+c > 0 { + return s[len(s)+c:] + } + if c >= 0 && len(s) > c { + return s[:c] + } + return s +} + +func join(sep string, v interface{}) string { + return strings.Join(strslice(v), sep) +} + +func split(sep, orig string) map[string]string { + parts := strings.Split(orig, sep) + res := make(map[string]string, len(parts)) + for i, v := range parts { + res["_"+strconv.Itoa(i)] = v + } + return res +} + +func splitn(sep string, n int, orig string) map[string]string { + parts := strings.SplitN(orig, sep, n) + res := make(map[string]string, len(parts)) + for i, v := range parts { + res["_"+strconv.Itoa(i)] = v + } + return res +} + +// substring creates a substring of the given string. +// +// If start is < 0, this calls string[:end]. +// +// If start is >= 0 and end < 0 or end bigger than s length, this calls string[start:] +// +// Otherwise, this calls string[start, end]. +func substring(start, end int, s string) string { + if start < 0 { + return s[:end] + } + if end < 0 || end > len(s) { + return s[start:] + } + return s[start:end] +} diff --git a/vendor/github.com/Masterminds/sprig/v3/url.go b/vendor/github.com/Masterminds/sprig/v3/url.go new file mode 100644 index 0000000000..b8e120e19b --- /dev/null +++ b/vendor/github.com/Masterminds/sprig/v3/url.go @@ -0,0 +1,66 @@ +package sprig + +import ( + "fmt" + "net/url" + "reflect" +) + +func dictGetOrEmpty(dict map[string]interface{}, key string) string { + value, ok := dict[key] + if !ok { + return "" + } + tp := reflect.TypeOf(value).Kind() + if tp != reflect.String { + panic(fmt.Sprintf("unable to parse %s key, must be of type string, but %s found", key, tp.String())) + } + return reflect.ValueOf(value).String() +} + +// parses given URL to return dict object +func urlParse(v string) map[string]interface{} { + dict := map[string]interface{}{} + parsedURL, err := url.Parse(v) + if err != nil { + panic(fmt.Sprintf("unable to parse url: %s", err)) + } + dict["scheme"] = parsedURL.Scheme + dict["host"] = parsedURL.Host + dict["hostname"] = parsedURL.Hostname() + dict["path"] = parsedURL.Path + dict["query"] = parsedURL.RawQuery + dict["opaque"] = parsedURL.Opaque + dict["fragment"] = parsedURL.Fragment + if parsedURL.User != nil { + dict["userinfo"] = parsedURL.User.String() + } else { + dict["userinfo"] = "" + } + + return dict +} + +// join given dict to URL string +func urlJoin(d map[string]interface{}) string { + resURL := url.URL{ + Scheme: dictGetOrEmpty(d, "scheme"), + Host: dictGetOrEmpty(d, "host"), + Path: dictGetOrEmpty(d, "path"), + RawQuery: dictGetOrEmpty(d, "query"), + Opaque: dictGetOrEmpty(d, "opaque"), + Fragment: dictGetOrEmpty(d, "fragment"), + } + userinfo := dictGetOrEmpty(d, "userinfo") + var user *url.Userinfo + if userinfo != "" { + tempURL, err := url.Parse(fmt.Sprintf("proto://%s@host", userinfo)) + if err != nil { + panic(fmt.Sprintf("unable to parse userinfo in dict: %s", err)) + } + user = tempURL.User + } + + resURL.User = user + return resURL.String() +} diff --git a/vendor/github.com/Masterminds/squirrel/.gitignore b/vendor/github.com/Masterminds/squirrel/.gitignore new file mode 100644 index 0000000000..4a0699f0b7 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/.gitignore @@ -0,0 +1 @@ +squirrel.test \ No newline at end of file diff --git a/vendor/github.com/Masterminds/squirrel/.travis.yml b/vendor/github.com/Masterminds/squirrel/.travis.yml new file mode 100644 index 0000000000..7bb6da4878 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/.travis.yml @@ -0,0 +1,30 @@ +language: go + +go: + - 1.11.x + - 1.12.x + - 1.13.x + +services: + - mysql + - postgresql + +# Setting sudo access to false will let Travis CI use containers rather than +# VMs to run the tests. For more details see: +# - http://docs.travis-ci.com/user/workers/container-based-infrastructure/ +# - http://docs.travis-ci.com/user/workers/standard-infrastructure/ +sudo: false + +before_script: + - mysql -e 'CREATE DATABASE squirrel;' + - psql -c 'CREATE DATABASE squirrel;' -U postgres + +script: + - go test + - cd integration + - go test -args -driver sqlite3 + - go test -args -driver mysql -dataSource travis@/squirrel + - go test -args -driver postgres -dataSource 'postgres://postgres@localhost/squirrel?sslmode=disable' + +notifications: + irc: "irc.freenode.net#masterminds" diff --git a/vendor/github.com/Masterminds/squirrel/LICENSE.txt b/vendor/github.com/Masterminds/squirrel/LICENSE.txt new file mode 100644 index 0000000000..74c20a2b97 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/LICENSE.txt @@ -0,0 +1,23 @@ +Squirrel +The Masterminds +Copyright (C) 2014-2015, Lann Martin +Copyright (C) 2015-2016, Google +Copyright (C) 2015, Matt Farina and Matt Butcher + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/Masterminds/squirrel/README.md b/vendor/github.com/Masterminds/squirrel/README.md new file mode 100644 index 0000000000..1d37f282f1 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/README.md @@ -0,0 +1,142 @@ +[![Stability: Maintenance](https://masterminds.github.io/stability/maintenance.svg)](https://masterminds.github.io/stability/maintenance.html) +### Squirrel is "complete". +Bug fixes will still be merged (slowly). Bug reports are welcome, but I will not necessarily respond to them. If another fork (or substantially similar project) actively improves on what Squirrel does, let me know and I may link to it here. + + +# Squirrel - fluent SQL generator for Go + +```go +import "github.com/Masterminds/squirrel" +``` + + +[![GoDoc](https://godoc.org/github.com/Masterminds/squirrel?status.png)](https://godoc.org/github.com/Masterminds/squirrel) +[![Build Status](https://api.travis-ci.org/Masterminds/squirrel.svg?branch=master)](https://travis-ci.org/Masterminds/squirrel) + +**Squirrel is not an ORM.** For an application of Squirrel, check out +[structable, a table-struct mapper](https://github.com/Masterminds/structable) + + +Squirrel helps you build SQL queries from composable parts: + +```go +import sq "github.com/Masterminds/squirrel" + +users := sq.Select("*").From("users").Join("emails USING (email_id)") + +active := users.Where(sq.Eq{"deleted_at": nil}) + +sql, args, err := active.ToSql() + +sql == "SELECT * FROM users JOIN emails USING (email_id) WHERE deleted_at IS NULL" +``` + +```go +sql, args, err := sq. + Insert("users").Columns("name", "age"). + Values("moe", 13).Values("larry", sq.Expr("? + 5", 12)). + ToSql() + +sql == "INSERT INTO users (name,age) VALUES (?,?),(?,? + 5)" +``` + +Squirrel can also execute queries directly: + +```go +stooges := users.Where(sq.Eq{"username": []string{"moe", "larry", "curly", "shemp"}}) +three_stooges := stooges.Limit(3) +rows, err := three_stooges.RunWith(db).Query() + +// Behaves like: +rows, err := db.Query("SELECT * FROM users WHERE username IN (?,?,?,?) LIMIT 3", + "moe", "larry", "curly", "shemp") +``` + +Squirrel makes conditional query building a breeze: + +```go +if len(q) > 0 { + users = users.Where("name LIKE ?", fmt.Sprint("%", q, "%")) +} +``` + +Squirrel wants to make your life easier: + +```go +// StmtCache caches Prepared Stmts for you +dbCache := sq.NewStmtCache(db) + +// StatementBuilder keeps your syntax neat +mydb := sq.StatementBuilder.RunWith(dbCache) +select_users := mydb.Select("*").From("users") +``` + +Squirrel loves PostgreSQL: + +```go +psql := sq.StatementBuilder.PlaceholderFormat(sq.Dollar) + +// You use question marks for placeholders... +sql, _, _ := psql.Select("*").From("elephants").Where("name IN (?,?)", "Dumbo", "Verna").ToSql() + +/// ...squirrel replaces them using PlaceholderFormat. +sql == "SELECT * FROM elephants WHERE name IN ($1,$2)" + + +/// You can retrieve id ... +query := sq.Insert("nodes"). + Columns("uuid", "type", "data"). + Values(node.Uuid, node.Type, node.Data). + Suffix("RETURNING \"id\""). + RunWith(m.db). + PlaceholderFormat(sq.Dollar) + +query.QueryRow().Scan(&node.id) +``` + +You can escape question marks by inserting two question marks: + +```sql +SELECT * FROM nodes WHERE meta->'format' ??| array[?,?] +``` + +will generate with the Dollar Placeholder: + +```sql +SELECT * FROM nodes WHERE meta->'format' ?| array[$1,$2] +``` + +## FAQ + +* **How can I build an IN query on composite keys / tuples, e.g. `WHERE (col1, col2) IN ((1,2),(3,4))`? ([#104](https://github.com/Masterminds/squirrel/issues/104))** + + Squirrel does not explicitly support tuples, but you can get the same effect with e.g.: + + ```go + sq.Or{ + sq.Eq{"col1": 1, "col2": 2}, + sq.Eq{"col1": 3, "col2": 4}} + ``` + + ```sql + WHERE (col1 = 1 AND col2 = 2) OR (col1 = 3 AND col2 = 4) + ``` + + (which should produce the same query plan as the tuple version) + +* **Why doesn't `Eq{"mynumber": []uint8{1,2,3}}` turn into an `IN` query? ([#114](https://github.com/Masterminds/squirrel/issues/114))** + + Values of type `[]byte` are handled specially by `database/sql`. In Go, [`byte` is just an alias of `uint8`](https://golang.org/pkg/builtin/#byte), so there is no way to distinguish `[]uint8` from `[]byte`. + +* **Some features are poorly documented!** + + This isn't a frequent complaints section! + +* **Some features are poorly documented?** + + Yes. The tests should be considered a part of the documentation; take a look at those for ideas on how to express more complex queries. + +## License + +Squirrel is released under the +[MIT License](http://www.opensource.org/licenses/MIT). diff --git a/vendor/github.com/Masterminds/squirrel/case.go b/vendor/github.com/Masterminds/squirrel/case.go new file mode 100644 index 0000000000..299e14b9d4 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/case.go @@ -0,0 +1,128 @@ +package squirrel + +import ( + "bytes" + "errors" + + "github.com/lann/builder" +) + +func init() { + builder.Register(CaseBuilder{}, caseData{}) +} + +// sqlizerBuffer is a helper that allows to write many Sqlizers one by one +// without constant checks for errors that may come from Sqlizer +type sqlizerBuffer struct { + bytes.Buffer + args []interface{} + err error +} + +// WriteSql converts Sqlizer to SQL strings and writes it to buffer +func (b *sqlizerBuffer) WriteSql(item Sqlizer) { + if b.err != nil { + return + } + + var str string + var args []interface{} + str, args, b.err = nestedToSql(item) + + if b.err != nil { + return + } + + b.WriteString(str) + b.WriteByte(' ') + b.args = append(b.args, args...) +} + +func (b *sqlizerBuffer) ToSql() (string, []interface{}, error) { + return b.String(), b.args, b.err +} + +// whenPart is a helper structure to describe SQLs "WHEN ... THEN ..." expression +type whenPart struct { + when Sqlizer + then Sqlizer +} + +func newWhenPart(when interface{}, then interface{}) whenPart { + return whenPart{newPart(when), newPart(then)} +} + +// caseData holds all the data required to build a CASE SQL construct +type caseData struct { + What Sqlizer + WhenParts []whenPart + Else Sqlizer +} + +// ToSql implements Sqlizer +func (d *caseData) ToSql() (sqlStr string, args []interface{}, err error) { + if len(d.WhenParts) == 0 { + err = errors.New("case expression must contain at lease one WHEN clause") + + return + } + + sql := sqlizerBuffer{} + + sql.WriteString("CASE ") + if d.What != nil { + sql.WriteSql(d.What) + } + + for _, p := range d.WhenParts { + sql.WriteString("WHEN ") + sql.WriteSql(p.when) + sql.WriteString("THEN ") + sql.WriteSql(p.then) + } + + if d.Else != nil { + sql.WriteString("ELSE ") + sql.WriteSql(d.Else) + } + + sql.WriteString("END") + + return sql.ToSql() +} + +// CaseBuilder builds SQL CASE construct which could be used as parts of queries. +type CaseBuilder builder.Builder + +// ToSql builds the query into a SQL string and bound args. +func (b CaseBuilder) ToSql() (string, []interface{}, error) { + data := builder.GetStruct(b).(caseData) + return data.ToSql() +} + +// MustSql builds the query into a SQL string and bound args. +// It panics if there are any errors. +func (b CaseBuilder) MustSql() (string, []interface{}) { + sql, args, err := b.ToSql() + if err != nil { + panic(err) + } + return sql, args +} + +// what sets optional value for CASE construct "CASE [value] ..." +func (b CaseBuilder) what(expr interface{}) CaseBuilder { + return builder.Set(b, "What", newPart(expr)).(CaseBuilder) +} + +// When adds "WHEN ... THEN ..." part to CASE construct +func (b CaseBuilder) When(when interface{}, then interface{}) CaseBuilder { + // TODO: performance hint: replace slice of WhenPart with just slice of parts + // where even indices of the slice belong to "when"s and odd indices belong to "then"s + return builder.Append(b, "WhenParts", newWhenPart(when, then)).(CaseBuilder) +} + +// What sets optional "ELSE ..." part for CASE construct +func (b CaseBuilder) Else(expr interface{}) CaseBuilder { + return builder.Set(b, "Else", newPart(expr)).(CaseBuilder) +} diff --git a/vendor/github.com/Masterminds/squirrel/delete.go b/vendor/github.com/Masterminds/squirrel/delete.go new file mode 100644 index 0000000000..f3f31e63ef --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/delete.go @@ -0,0 +1,191 @@ +package squirrel + +import ( + "bytes" + "database/sql" + "fmt" + "strings" + + "github.com/lann/builder" +) + +type deleteData struct { + PlaceholderFormat PlaceholderFormat + RunWith BaseRunner + Prefixes []Sqlizer + From string + WhereParts []Sqlizer + OrderBys []string + Limit string + Offset string + Suffixes []Sqlizer +} + +func (d *deleteData) Exec() (sql.Result, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + return ExecWith(d.RunWith, d) +} + +func (d *deleteData) ToSql() (sqlStr string, args []interface{}, err error) { + if len(d.From) == 0 { + err = fmt.Errorf("delete statements must specify a From table") + return + } + + sql := &bytes.Buffer{} + + if len(d.Prefixes) > 0 { + args, err = appendToSql(d.Prefixes, sql, " ", args) + if err != nil { + return + } + + sql.WriteString(" ") + } + + sql.WriteString("DELETE FROM ") + sql.WriteString(d.From) + + if len(d.WhereParts) > 0 { + sql.WriteString(" WHERE ") + args, err = appendToSql(d.WhereParts, sql, " AND ", args) + if err != nil { + return + } + } + + if len(d.OrderBys) > 0 { + sql.WriteString(" ORDER BY ") + sql.WriteString(strings.Join(d.OrderBys, ", ")) + } + + if len(d.Limit) > 0 { + sql.WriteString(" LIMIT ") + sql.WriteString(d.Limit) + } + + if len(d.Offset) > 0 { + sql.WriteString(" OFFSET ") + sql.WriteString(d.Offset) + } + + if len(d.Suffixes) > 0 { + sql.WriteString(" ") + args, err = appendToSql(d.Suffixes, sql, " ", args) + if err != nil { + return + } + } + + sqlStr, err = d.PlaceholderFormat.ReplacePlaceholders(sql.String()) + return +} + +// Builder + +// DeleteBuilder builds SQL DELETE statements. +type DeleteBuilder builder.Builder + +func init() { + builder.Register(DeleteBuilder{}, deleteData{}) +} + +// Format methods + +// PlaceholderFormat sets PlaceholderFormat (e.g. Question or Dollar) for the +// query. +func (b DeleteBuilder) PlaceholderFormat(f PlaceholderFormat) DeleteBuilder { + return builder.Set(b, "PlaceholderFormat", f).(DeleteBuilder) +} + +// Runner methods + +// RunWith sets a Runner (like database/sql.DB) to be used with e.g. Exec. +func (b DeleteBuilder) RunWith(runner BaseRunner) DeleteBuilder { + return setRunWith(b, runner).(DeleteBuilder) +} + +// Exec builds and Execs the query with the Runner set by RunWith. +func (b DeleteBuilder) Exec() (sql.Result, error) { + data := builder.GetStruct(b).(deleteData) + return data.Exec() +} + +// SQL methods + +// ToSql builds the query into a SQL string and bound args. +func (b DeleteBuilder) ToSql() (string, []interface{}, error) { + data := builder.GetStruct(b).(deleteData) + return data.ToSql() +} + +// MustSql builds the query into a SQL string and bound args. +// It panics if there are any errors. +func (b DeleteBuilder) MustSql() (string, []interface{}) { + sql, args, err := b.ToSql() + if err != nil { + panic(err) + } + return sql, args +} + +// Prefix adds an expression to the beginning of the query +func (b DeleteBuilder) Prefix(sql string, args ...interface{}) DeleteBuilder { + return b.PrefixExpr(Expr(sql, args...)) +} + +// PrefixExpr adds an expression to the very beginning of the query +func (b DeleteBuilder) PrefixExpr(expr Sqlizer) DeleteBuilder { + return builder.Append(b, "Prefixes", expr).(DeleteBuilder) +} + +// From sets the table to be deleted from. +func (b DeleteBuilder) From(from string) DeleteBuilder { + return builder.Set(b, "From", from).(DeleteBuilder) +} + +// Where adds WHERE expressions to the query. +// +// See SelectBuilder.Where for more information. +func (b DeleteBuilder) Where(pred interface{}, args ...interface{}) DeleteBuilder { + return builder.Append(b, "WhereParts", newWherePart(pred, args...)).(DeleteBuilder) +} + +// OrderBy adds ORDER BY expressions to the query. +func (b DeleteBuilder) OrderBy(orderBys ...string) DeleteBuilder { + return builder.Extend(b, "OrderBys", orderBys).(DeleteBuilder) +} + +// Limit sets a LIMIT clause on the query. +func (b DeleteBuilder) Limit(limit uint64) DeleteBuilder { + return builder.Set(b, "Limit", fmt.Sprintf("%d", limit)).(DeleteBuilder) +} + +// Offset sets a OFFSET clause on the query. +func (b DeleteBuilder) Offset(offset uint64) DeleteBuilder { + return builder.Set(b, "Offset", fmt.Sprintf("%d", offset)).(DeleteBuilder) +} + +// Suffix adds an expression to the end of the query +func (b DeleteBuilder) Suffix(sql string, args ...interface{}) DeleteBuilder { + return b.SuffixExpr(Expr(sql, args...)) +} + +// SuffixExpr adds an expression to the end of the query +func (b DeleteBuilder) SuffixExpr(expr Sqlizer) DeleteBuilder { + return builder.Append(b, "Suffixes", expr).(DeleteBuilder) +} + +func (b DeleteBuilder) Query() (*sql.Rows, error) { + data := builder.GetStruct(b).(deleteData) + return data.Query() +} + +func (d *deleteData) Query() (*sql.Rows, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + return QueryWith(d.RunWith, d) +} diff --git a/vendor/github.com/Masterminds/squirrel/delete_ctx.go b/vendor/github.com/Masterminds/squirrel/delete_ctx.go new file mode 100644 index 0000000000..de83c55df3 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/delete_ctx.go @@ -0,0 +1,69 @@ +// +build go1.8 + +package squirrel + +import ( + "context" + "database/sql" + + "github.com/lann/builder" +) + +func (d *deleteData) ExecContext(ctx context.Context) (sql.Result, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + ctxRunner, ok := d.RunWith.(ExecerContext) + if !ok { + return nil, NoContextSupport + } + return ExecContextWith(ctx, ctxRunner, d) +} + +func (d *deleteData) QueryContext(ctx context.Context) (*sql.Rows, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + ctxRunner, ok := d.RunWith.(QueryerContext) + if !ok { + return nil, NoContextSupport + } + return QueryContextWith(ctx, ctxRunner, d) +} + +func (d *deleteData) QueryRowContext(ctx context.Context) RowScanner { + if d.RunWith == nil { + return &Row{err: RunnerNotSet} + } + queryRower, ok := d.RunWith.(QueryRowerContext) + if !ok { + if _, ok := d.RunWith.(QueryerContext); !ok { + return &Row{err: RunnerNotQueryRunner} + } + return &Row{err: NoContextSupport} + } + return QueryRowContextWith(ctx, queryRower, d) +} + +// ExecContext builds and ExecContexts the query with the Runner set by RunWith. +func (b DeleteBuilder) ExecContext(ctx context.Context) (sql.Result, error) { + data := builder.GetStruct(b).(deleteData) + return data.ExecContext(ctx) +} + +// QueryContext builds and QueryContexts the query with the Runner set by RunWith. +func (b DeleteBuilder) QueryContext(ctx context.Context) (*sql.Rows, error) { + data := builder.GetStruct(b).(deleteData) + return data.QueryContext(ctx) +} + +// QueryRowContext builds and QueryRowContexts the query with the Runner set by RunWith. +func (b DeleteBuilder) QueryRowContext(ctx context.Context) RowScanner { + data := builder.GetStruct(b).(deleteData) + return data.QueryRowContext(ctx) +} + +// ScanContext is a shortcut for QueryRowContext().Scan. +func (b DeleteBuilder) ScanContext(ctx context.Context, dest ...interface{}) error { + return b.QueryRowContext(ctx).Scan(dest...) +} diff --git a/vendor/github.com/Masterminds/squirrel/expr.go b/vendor/github.com/Masterminds/squirrel/expr.go new file mode 100644 index 0000000000..eba1b4579e --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/expr.go @@ -0,0 +1,419 @@ +package squirrel + +import ( + "bytes" + "database/sql/driver" + "fmt" + "reflect" + "sort" + "strings" +) + +const ( + // Portable true/false literals. + sqlTrue = "(1=1)" + sqlFalse = "(1=0)" +) + +type expr struct { + sql string + args []interface{} +} + +// Expr builds an expression from a SQL fragment and arguments. +// +// Ex: +// Expr("FROM_UNIXTIME(?)", t) +func Expr(sql string, args ...interface{}) Sqlizer { + return expr{sql: sql, args: args} +} + +func (e expr) ToSql() (sql string, args []interface{}, err error) { + simple := true + for _, arg := range e.args { + if _, ok := arg.(Sqlizer); ok { + simple = false + } + } + if simple { + return e.sql, e.args, nil + } + + buf := &bytes.Buffer{} + ap := e.args + sp := e.sql + + var isql string + var iargs []interface{} + + for err == nil && len(ap) > 0 && len(sp) > 0 { + i := strings.Index(sp, "?") + if i < 0 { + // no more placeholders + break + } + if len(sp) > i+1 && sp[i+1:i+2] == "?" { + // escaped "??"; append it and step past + buf.WriteString(sp[:i+2]) + sp = sp[i+2:] + continue + } + + if as, ok := ap[0].(Sqlizer); ok { + // sqlizer argument; expand it and append the result + isql, iargs, err = as.ToSql() + buf.WriteString(sp[:i]) + buf.WriteString(isql) + args = append(args, iargs...) + } else { + // normal argument; append it and the placeholder + buf.WriteString(sp[:i+1]) + args = append(args, ap[0]) + } + + // step past the argument and placeholder + ap = ap[1:] + sp = sp[i+1:] + } + + // append the remaining sql and arguments + buf.WriteString(sp) + return buf.String(), append(args, ap...), err +} + +type concatExpr []interface{} + +func (ce concatExpr) ToSql() (sql string, args []interface{}, err error) { + for _, part := range ce { + switch p := part.(type) { + case string: + sql += p + case Sqlizer: + pSql, pArgs, err := p.ToSql() + if err != nil { + return "", nil, err + } + sql += pSql + args = append(args, pArgs...) + default: + return "", nil, fmt.Errorf("%#v is not a string or Sqlizer", part) + } + } + return +} + +// ConcatExpr builds an expression by concatenating strings and other expressions. +// +// Ex: +// name_expr := Expr("CONCAT(?, ' ', ?)", firstName, lastName) +// ConcatExpr("COALESCE(full_name,", name_expr, ")") +func ConcatExpr(parts ...interface{}) concatExpr { + return concatExpr(parts) +} + +// aliasExpr helps to alias part of SQL query generated with underlying "expr" +type aliasExpr struct { + expr Sqlizer + alias string +} + +// Alias allows to define alias for column in SelectBuilder. Useful when column is +// defined as complex expression like IF or CASE +// Ex: +// .Column(Alias(caseStmt, "case_column")) +func Alias(expr Sqlizer, alias string) aliasExpr { + return aliasExpr{expr, alias} +} + +func (e aliasExpr) ToSql() (sql string, args []interface{}, err error) { + sql, args, err = e.expr.ToSql() + if err == nil { + sql = fmt.Sprintf("(%s) AS %s", sql, e.alias) + } + return +} + +// Eq is syntactic sugar for use with Where/Having/Set methods. +type Eq map[string]interface{} + +func (eq Eq) toSQL(useNotOpr bool) (sql string, args []interface{}, err error) { + if len(eq) == 0 { + // Empty Sql{} evaluates to true. + sql = sqlTrue + return + } + + var ( + exprs []string + equalOpr = "=" + inOpr = "IN" + nullOpr = "IS" + inEmptyExpr = sqlFalse + ) + + if useNotOpr { + equalOpr = "<>" + inOpr = "NOT IN" + nullOpr = "IS NOT" + inEmptyExpr = sqlTrue + } + + sortedKeys := getSortedKeys(eq) + for _, key := range sortedKeys { + var expr string + val := eq[key] + + switch v := val.(type) { + case driver.Valuer: + if val, err = v.Value(); err != nil { + return + } + } + + r := reflect.ValueOf(val) + if r.Kind() == reflect.Ptr { + if r.IsNil() { + val = nil + } else { + val = r.Elem().Interface() + } + } + + if val == nil { + expr = fmt.Sprintf("%s %s NULL", key, nullOpr) + } else { + if isListType(val) { + valVal := reflect.ValueOf(val) + if valVal.Len() == 0 { + expr = inEmptyExpr + if args == nil { + args = []interface{}{} + } + } else { + for i := 0; i < valVal.Len(); i++ { + args = append(args, valVal.Index(i).Interface()) + } + expr = fmt.Sprintf("%s %s (%s)", key, inOpr, Placeholders(valVal.Len())) + } + } else { + expr = fmt.Sprintf("%s %s ?", key, equalOpr) + args = append(args, val) + } + } + exprs = append(exprs, expr) + } + sql = strings.Join(exprs, " AND ") + return +} + +func (eq Eq) ToSql() (sql string, args []interface{}, err error) { + return eq.toSQL(false) +} + +// NotEq is syntactic sugar for use with Where/Having/Set methods. +// Ex: +// .Where(NotEq{"id": 1}) == "id <> 1" +type NotEq Eq + +func (neq NotEq) ToSql() (sql string, args []interface{}, err error) { + return Eq(neq).toSQL(true) +} + +// Like is syntactic sugar for use with LIKE conditions. +// Ex: +// .Where(Like{"name": "%irrel"}) +type Like map[string]interface{} + +func (lk Like) toSql(opr string) (sql string, args []interface{}, err error) { + var exprs []string + for key, val := range lk { + expr := "" + + switch v := val.(type) { + case driver.Valuer: + if val, err = v.Value(); err != nil { + return + } + } + + if val == nil { + err = fmt.Errorf("cannot use null with like operators") + return + } else { + if isListType(val) { + err = fmt.Errorf("cannot use array or slice with like operators") + return + } else { + expr = fmt.Sprintf("%s %s ?", key, opr) + args = append(args, val) + } + } + exprs = append(exprs, expr) + } + sql = strings.Join(exprs, " AND ") + return +} + +func (lk Like) ToSql() (sql string, args []interface{}, err error) { + return lk.toSql("LIKE") +} + +// NotLike is syntactic sugar for use with LIKE conditions. +// Ex: +// .Where(NotLike{"name": "%irrel"}) +type NotLike Like + +func (nlk NotLike) ToSql() (sql string, args []interface{}, err error) { + return Like(nlk).toSql("NOT LIKE") +} + +// ILike is syntactic sugar for use with ILIKE conditions. +// Ex: +// .Where(ILike{"name": "sq%"}) +type ILike Like + +func (ilk ILike) ToSql() (sql string, args []interface{}, err error) { + return Like(ilk).toSql("ILIKE") +} + +// NotILike is syntactic sugar for use with ILIKE conditions. +// Ex: +// .Where(NotILike{"name": "sq%"}) +type NotILike Like + +func (nilk NotILike) ToSql() (sql string, args []interface{}, err error) { + return Like(nilk).toSql("NOT ILIKE") +} + +// Lt is syntactic sugar for use with Where/Having/Set methods. +// Ex: +// .Where(Lt{"id": 1}) +type Lt map[string]interface{} + +func (lt Lt) toSql(opposite, orEq bool) (sql string, args []interface{}, err error) { + var ( + exprs []string + opr = "<" + ) + + if opposite { + opr = ">" + } + + if orEq { + opr = fmt.Sprintf("%s%s", opr, "=") + } + + sortedKeys := getSortedKeys(lt) + for _, key := range sortedKeys { + var expr string + val := lt[key] + + switch v := val.(type) { + case driver.Valuer: + if val, err = v.Value(); err != nil { + return + } + } + + if val == nil { + err = fmt.Errorf("cannot use null with less than or greater than operators") + return + } + if isListType(val) { + err = fmt.Errorf("cannot use array or slice with less than or greater than operators") + return + } + expr = fmt.Sprintf("%s %s ?", key, opr) + args = append(args, val) + + exprs = append(exprs, expr) + } + sql = strings.Join(exprs, " AND ") + return +} + +func (lt Lt) ToSql() (sql string, args []interface{}, err error) { + return lt.toSql(false, false) +} + +// LtOrEq is syntactic sugar for use with Where/Having/Set methods. +// Ex: +// .Where(LtOrEq{"id": 1}) == "id <= 1" +type LtOrEq Lt + +func (ltOrEq LtOrEq) ToSql() (sql string, args []interface{}, err error) { + return Lt(ltOrEq).toSql(false, true) +} + +// Gt is syntactic sugar for use with Where/Having/Set methods. +// Ex: +// .Where(Gt{"id": 1}) == "id > 1" +type Gt Lt + +func (gt Gt) ToSql() (sql string, args []interface{}, err error) { + return Lt(gt).toSql(true, false) +} + +// GtOrEq is syntactic sugar for use with Where/Having/Set methods. +// Ex: +// .Where(GtOrEq{"id": 1}) == "id >= 1" +type GtOrEq Lt + +func (gtOrEq GtOrEq) ToSql() (sql string, args []interface{}, err error) { + return Lt(gtOrEq).toSql(true, true) +} + +type conj []Sqlizer + +func (c conj) join(sep, defaultExpr string) (sql string, args []interface{}, err error) { + if len(c) == 0 { + return defaultExpr, []interface{}{}, nil + } + var sqlParts []string + for _, sqlizer := range c { + partSQL, partArgs, err := nestedToSql(sqlizer) + if err != nil { + return "", nil, err + } + if partSQL != "" { + sqlParts = append(sqlParts, partSQL) + args = append(args, partArgs...) + } + } + if len(sqlParts) > 0 { + sql = fmt.Sprintf("(%s)", strings.Join(sqlParts, sep)) + } + return +} + +// And conjunction Sqlizers +type And conj + +func (a And) ToSql() (string, []interface{}, error) { + return conj(a).join(" AND ", sqlTrue) +} + +// Or conjunction Sqlizers +type Or conj + +func (o Or) ToSql() (string, []interface{}, error) { + return conj(o).join(" OR ", sqlFalse) +} + +func getSortedKeys(exp map[string]interface{}) []string { + sortedKeys := make([]string, 0, len(exp)) + for k := range exp { + sortedKeys = append(sortedKeys, k) + } + sort.Strings(sortedKeys) + return sortedKeys +} + +func isListType(val interface{}) bool { + if driver.IsValue(val) { + return false + } + valVal := reflect.ValueOf(val) + return valVal.Kind() == reflect.Array || valVal.Kind() == reflect.Slice +} diff --git a/vendor/github.com/Masterminds/squirrel/insert.go b/vendor/github.com/Masterminds/squirrel/insert.go new file mode 100644 index 0000000000..c23a57935b --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/insert.go @@ -0,0 +1,298 @@ +package squirrel + +import ( + "bytes" + "database/sql" + "errors" + "fmt" + "io" + "sort" + "strings" + + "github.com/lann/builder" +) + +type insertData struct { + PlaceholderFormat PlaceholderFormat + RunWith BaseRunner + Prefixes []Sqlizer + StatementKeyword string + Options []string + Into string + Columns []string + Values [][]interface{} + Suffixes []Sqlizer + Select *SelectBuilder +} + +func (d *insertData) Exec() (sql.Result, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + return ExecWith(d.RunWith, d) +} + +func (d *insertData) Query() (*sql.Rows, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + return QueryWith(d.RunWith, d) +} + +func (d *insertData) QueryRow() RowScanner { + if d.RunWith == nil { + return &Row{err: RunnerNotSet} + } + queryRower, ok := d.RunWith.(QueryRower) + if !ok { + return &Row{err: RunnerNotQueryRunner} + } + return QueryRowWith(queryRower, d) +} + +func (d *insertData) ToSql() (sqlStr string, args []interface{}, err error) { + if len(d.Into) == 0 { + err = errors.New("insert statements must specify a table") + return + } + if len(d.Values) == 0 && d.Select == nil { + err = errors.New("insert statements must have at least one set of values or select clause") + return + } + + sql := &bytes.Buffer{} + + if len(d.Prefixes) > 0 { + args, err = appendToSql(d.Prefixes, sql, " ", args) + if err != nil { + return + } + + sql.WriteString(" ") + } + + if d.StatementKeyword == "" { + sql.WriteString("INSERT ") + } else { + sql.WriteString(d.StatementKeyword) + sql.WriteString(" ") + } + + if len(d.Options) > 0 { + sql.WriteString(strings.Join(d.Options, " ")) + sql.WriteString(" ") + } + + sql.WriteString("INTO ") + sql.WriteString(d.Into) + sql.WriteString(" ") + + if len(d.Columns) > 0 { + sql.WriteString("(") + sql.WriteString(strings.Join(d.Columns, ",")) + sql.WriteString(") ") + } + + if d.Select != nil { + args, err = d.appendSelectToSQL(sql, args) + } else { + args, err = d.appendValuesToSQL(sql, args) + } + if err != nil { + return + } + + if len(d.Suffixes) > 0 { + sql.WriteString(" ") + args, err = appendToSql(d.Suffixes, sql, " ", args) + if err != nil { + return + } + } + + sqlStr, err = d.PlaceholderFormat.ReplacePlaceholders(sql.String()) + return +} + +func (d *insertData) appendValuesToSQL(w io.Writer, args []interface{}) ([]interface{}, error) { + if len(d.Values) == 0 { + return args, errors.New("values for insert statements are not set") + } + + io.WriteString(w, "VALUES ") + + valuesStrings := make([]string, len(d.Values)) + for r, row := range d.Values { + valueStrings := make([]string, len(row)) + for v, val := range row { + if vs, ok := val.(Sqlizer); ok { + vsql, vargs, err := vs.ToSql() + if err != nil { + return nil, err + } + valueStrings[v] = vsql + args = append(args, vargs...) + } else { + valueStrings[v] = "?" + args = append(args, val) + } + } + valuesStrings[r] = fmt.Sprintf("(%s)", strings.Join(valueStrings, ",")) + } + + io.WriteString(w, strings.Join(valuesStrings, ",")) + + return args, nil +} + +func (d *insertData) appendSelectToSQL(w io.Writer, args []interface{}) ([]interface{}, error) { + if d.Select == nil { + return args, errors.New("select clause for insert statements are not set") + } + + selectClause, sArgs, err := d.Select.ToSql() + if err != nil { + return args, err + } + + io.WriteString(w, selectClause) + args = append(args, sArgs...) + + return args, nil +} + +// Builder + +// InsertBuilder builds SQL INSERT statements. +type InsertBuilder builder.Builder + +func init() { + builder.Register(InsertBuilder{}, insertData{}) +} + +// Format methods + +// PlaceholderFormat sets PlaceholderFormat (e.g. Question or Dollar) for the +// query. +func (b InsertBuilder) PlaceholderFormat(f PlaceholderFormat) InsertBuilder { + return builder.Set(b, "PlaceholderFormat", f).(InsertBuilder) +} + +// Runner methods + +// RunWith sets a Runner (like database/sql.DB) to be used with e.g. Exec. +func (b InsertBuilder) RunWith(runner BaseRunner) InsertBuilder { + return setRunWith(b, runner).(InsertBuilder) +} + +// Exec builds and Execs the query with the Runner set by RunWith. +func (b InsertBuilder) Exec() (sql.Result, error) { + data := builder.GetStruct(b).(insertData) + return data.Exec() +} + +// Query builds and Querys the query with the Runner set by RunWith. +func (b InsertBuilder) Query() (*sql.Rows, error) { + data := builder.GetStruct(b).(insertData) + return data.Query() +} + +// QueryRow builds and QueryRows the query with the Runner set by RunWith. +func (b InsertBuilder) QueryRow() RowScanner { + data := builder.GetStruct(b).(insertData) + return data.QueryRow() +} + +// Scan is a shortcut for QueryRow().Scan. +func (b InsertBuilder) Scan(dest ...interface{}) error { + return b.QueryRow().Scan(dest...) +} + +// SQL methods + +// ToSql builds the query into a SQL string and bound args. +func (b InsertBuilder) ToSql() (string, []interface{}, error) { + data := builder.GetStruct(b).(insertData) + return data.ToSql() +} + +// MustSql builds the query into a SQL string and bound args. +// It panics if there are any errors. +func (b InsertBuilder) MustSql() (string, []interface{}) { + sql, args, err := b.ToSql() + if err != nil { + panic(err) + } + return sql, args +} + +// Prefix adds an expression to the beginning of the query +func (b InsertBuilder) Prefix(sql string, args ...interface{}) InsertBuilder { + return b.PrefixExpr(Expr(sql, args...)) +} + +// PrefixExpr adds an expression to the very beginning of the query +func (b InsertBuilder) PrefixExpr(expr Sqlizer) InsertBuilder { + return builder.Append(b, "Prefixes", expr).(InsertBuilder) +} + +// Options adds keyword options before the INTO clause of the query. +func (b InsertBuilder) Options(options ...string) InsertBuilder { + return builder.Extend(b, "Options", options).(InsertBuilder) +} + +// Into sets the INTO clause of the query. +func (b InsertBuilder) Into(from string) InsertBuilder { + return builder.Set(b, "Into", from).(InsertBuilder) +} + +// Columns adds insert columns to the query. +func (b InsertBuilder) Columns(columns ...string) InsertBuilder { + return builder.Extend(b, "Columns", columns).(InsertBuilder) +} + +// Values adds a single row's values to the query. +func (b InsertBuilder) Values(values ...interface{}) InsertBuilder { + return builder.Append(b, "Values", values).(InsertBuilder) +} + +// Suffix adds an expression to the end of the query +func (b InsertBuilder) Suffix(sql string, args ...interface{}) InsertBuilder { + return b.SuffixExpr(Expr(sql, args...)) +} + +// SuffixExpr adds an expression to the end of the query +func (b InsertBuilder) SuffixExpr(expr Sqlizer) InsertBuilder { + return builder.Append(b, "Suffixes", expr).(InsertBuilder) +} + +// SetMap set columns and values for insert builder from a map of column name and value +// note that it will reset all previous columns and values was set if any +func (b InsertBuilder) SetMap(clauses map[string]interface{}) InsertBuilder { + // Keep the columns in a consistent order by sorting the column key string. + cols := make([]string, 0, len(clauses)) + for col := range clauses { + cols = append(cols, col) + } + sort.Strings(cols) + + vals := make([]interface{}, 0, len(clauses)) + for _, col := range cols { + vals = append(vals, clauses[col]) + } + + b = builder.Set(b, "Columns", cols).(InsertBuilder) + b = builder.Set(b, "Values", [][]interface{}{vals}).(InsertBuilder) + + return b +} + +// Select set Select clause for insert query +// If Values and Select are used, then Select has higher priority +func (b InsertBuilder) Select(sb SelectBuilder) InsertBuilder { + return builder.Set(b, "Select", &sb).(InsertBuilder) +} + +func (b InsertBuilder) statementKeyword(keyword string) InsertBuilder { + return builder.Set(b, "StatementKeyword", keyword).(InsertBuilder) +} diff --git a/vendor/github.com/Masterminds/squirrel/insert_ctx.go b/vendor/github.com/Masterminds/squirrel/insert_ctx.go new file mode 100644 index 0000000000..4541c2fed3 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/insert_ctx.go @@ -0,0 +1,69 @@ +// +build go1.8 + +package squirrel + +import ( + "context" + "database/sql" + + "github.com/lann/builder" +) + +func (d *insertData) ExecContext(ctx context.Context) (sql.Result, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + ctxRunner, ok := d.RunWith.(ExecerContext) + if !ok { + return nil, NoContextSupport + } + return ExecContextWith(ctx, ctxRunner, d) +} + +func (d *insertData) QueryContext(ctx context.Context) (*sql.Rows, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + ctxRunner, ok := d.RunWith.(QueryerContext) + if !ok { + return nil, NoContextSupport + } + return QueryContextWith(ctx, ctxRunner, d) +} + +func (d *insertData) QueryRowContext(ctx context.Context) RowScanner { + if d.RunWith == nil { + return &Row{err: RunnerNotSet} + } + queryRower, ok := d.RunWith.(QueryRowerContext) + if !ok { + if _, ok := d.RunWith.(QueryerContext); !ok { + return &Row{err: RunnerNotQueryRunner} + } + return &Row{err: NoContextSupport} + } + return QueryRowContextWith(ctx, queryRower, d) +} + +// ExecContext builds and ExecContexts the query with the Runner set by RunWith. +func (b InsertBuilder) ExecContext(ctx context.Context) (sql.Result, error) { + data := builder.GetStruct(b).(insertData) + return data.ExecContext(ctx) +} + +// QueryContext builds and QueryContexts the query with the Runner set by RunWith. +func (b InsertBuilder) QueryContext(ctx context.Context) (*sql.Rows, error) { + data := builder.GetStruct(b).(insertData) + return data.QueryContext(ctx) +} + +// QueryRowContext builds and QueryRowContexts the query with the Runner set by RunWith. +func (b InsertBuilder) QueryRowContext(ctx context.Context) RowScanner { + data := builder.GetStruct(b).(insertData) + return data.QueryRowContext(ctx) +} + +// ScanContext is a shortcut for QueryRowContext().Scan. +func (b InsertBuilder) ScanContext(ctx context.Context, dest ...interface{}) error { + return b.QueryRowContext(ctx).Scan(dest...) +} diff --git a/vendor/github.com/Masterminds/squirrel/part.go b/vendor/github.com/Masterminds/squirrel/part.go new file mode 100644 index 0000000000..f3a7b1545a --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/part.go @@ -0,0 +1,63 @@ +package squirrel + +import ( + "fmt" + "io" +) + +type part struct { + pred interface{} + args []interface{} +} + +func newPart(pred interface{}, args ...interface{}) Sqlizer { + return &part{pred, args} +} + +func (p part) ToSql() (sql string, args []interface{}, err error) { + switch pred := p.pred.(type) { + case nil: + // no-op + case Sqlizer: + sql, args, err = pred.ToSql() + case string: + sql = pred + args = p.args + default: + err = fmt.Errorf("expected string or Sqlizer, not %T", pred) + } + return +} + +func nestedToSql(s Sqlizer) (string, []interface{}, error) { + if raw, ok := s.(rawSqlizer); ok { + return raw.toSqlRaw() + } else { + return s.ToSql() + } +} + +func appendToSql(parts []Sqlizer, w io.Writer, sep string, args []interface{}) ([]interface{}, error) { + for i, p := range parts { + partSql, partArgs, err := nestedToSql(p) + if err != nil { + return nil, err + } else if len(partSql) == 0 { + continue + } + + if i > 0 { + _, err := io.WriteString(w, sep) + if err != nil { + return nil, err + } + } + + _, err = io.WriteString(w, partSql) + if err != nil { + return nil, err + } + args = append(args, partArgs...) + } + return args, nil +} diff --git a/vendor/github.com/Masterminds/squirrel/placeholder.go b/vendor/github.com/Masterminds/squirrel/placeholder.go new file mode 100644 index 0000000000..8e97a6c62d --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/placeholder.go @@ -0,0 +1,114 @@ +package squirrel + +import ( + "bytes" + "fmt" + "strings" +) + +// PlaceholderFormat is the interface that wraps the ReplacePlaceholders method. +// +// ReplacePlaceholders takes a SQL statement and replaces each question mark +// placeholder with a (possibly different) SQL placeholder. +type PlaceholderFormat interface { + ReplacePlaceholders(sql string) (string, error) +} + +type placeholderDebugger interface { + debugPlaceholder() string +} + +var ( + // Question is a PlaceholderFormat instance that leaves placeholders as + // question marks. + Question = questionFormat{} + + // Dollar is a PlaceholderFormat instance that replaces placeholders with + // dollar-prefixed positional placeholders (e.g. $1, $2, $3). + Dollar = dollarFormat{} + + // Colon is a PlaceholderFormat instance that replaces placeholders with + // colon-prefixed positional placeholders (e.g. :1, :2, :3). + Colon = colonFormat{} + + // AtP is a PlaceholderFormat instance that replaces placeholders with + // "@p"-prefixed positional placeholders (e.g. @p1, @p2, @p3). + AtP = atpFormat{} +) + +type questionFormat struct{} + +func (questionFormat) ReplacePlaceholders(sql string) (string, error) { + return sql, nil +} + +func (questionFormat) debugPlaceholder() string { + return "?" +} + +type dollarFormat struct{} + +func (dollarFormat) ReplacePlaceholders(sql string) (string, error) { + return replacePositionalPlaceholders(sql, "$") +} + +func (dollarFormat) debugPlaceholder() string { + return "$" +} + +type colonFormat struct{} + +func (colonFormat) ReplacePlaceholders(sql string) (string, error) { + return replacePositionalPlaceholders(sql, ":") +} + +func (colonFormat) debugPlaceholder() string { + return ":" +} + +type atpFormat struct{} + +func (atpFormat) ReplacePlaceholders(sql string) (string, error) { + return replacePositionalPlaceholders(sql, "@p") +} + +func (atpFormat) debugPlaceholder() string { + return "@p" +} + +// Placeholders returns a string with count ? placeholders joined with commas. +func Placeholders(count int) string { + if count < 1 { + return "" + } + + return strings.Repeat(",?", count)[1:] +} + +func replacePositionalPlaceholders(sql, prefix string) (string, error) { + buf := &bytes.Buffer{} + i := 0 + for { + p := strings.Index(sql, "?") + if p == -1 { + break + } + + if len(sql[p:]) > 1 && sql[p:p+2] == "??" { // escape ?? => ? + buf.WriteString(sql[:p]) + buf.WriteString("?") + if len(sql[p:]) == 1 { + break + } + sql = sql[p+2:] + } else { + i++ + buf.WriteString(sql[:p]) + fmt.Fprintf(buf, "%s%d", prefix, i) + sql = sql[p+1:] + } + } + + buf.WriteString(sql) + return buf.String(), nil +} diff --git a/vendor/github.com/Masterminds/squirrel/row.go b/vendor/github.com/Masterminds/squirrel/row.go new file mode 100644 index 0000000000..74ffda92bd --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/row.go @@ -0,0 +1,22 @@ +package squirrel + +// RowScanner is the interface that wraps the Scan method. +// +// Scan behaves like database/sql.Row.Scan. +type RowScanner interface { + Scan(...interface{}) error +} + +// Row wraps database/sql.Row to let squirrel return new errors on Scan. +type Row struct { + RowScanner + err error +} + +// Scan returns Row.err or calls RowScanner.Scan. +func (r *Row) Scan(dest ...interface{}) error { + if r.err != nil { + return r.err + } + return r.RowScanner.Scan(dest...) +} diff --git a/vendor/github.com/Masterminds/squirrel/select.go b/vendor/github.com/Masterminds/squirrel/select.go new file mode 100644 index 0000000000..b585344cec --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/select.go @@ -0,0 +1,396 @@ +package squirrel + +import ( + "bytes" + "database/sql" + "fmt" + "strings" + + "github.com/lann/builder" +) + +type selectData struct { + PlaceholderFormat PlaceholderFormat + RunWith BaseRunner + Prefixes []Sqlizer + Options []string + Columns []Sqlizer + From Sqlizer + Joins []Sqlizer + WhereParts []Sqlizer + GroupBys []string + HavingParts []Sqlizer + OrderByParts []Sqlizer + Limit string + Offset string + Suffixes []Sqlizer +} + +func (d *selectData) Exec() (sql.Result, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + return ExecWith(d.RunWith, d) +} + +func (d *selectData) Query() (*sql.Rows, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + return QueryWith(d.RunWith, d) +} + +func (d *selectData) QueryRow() RowScanner { + if d.RunWith == nil { + return &Row{err: RunnerNotSet} + } + queryRower, ok := d.RunWith.(QueryRower) + if !ok { + return &Row{err: RunnerNotQueryRunner} + } + return QueryRowWith(queryRower, d) +} + +func (d *selectData) ToSql() (sqlStr string, args []interface{}, err error) { + sqlStr, args, err = d.toSqlRaw() + if err != nil { + return + } + + sqlStr, err = d.PlaceholderFormat.ReplacePlaceholders(sqlStr) + return +} + +func (d *selectData) toSqlRaw() (sqlStr string, args []interface{}, err error) { + if len(d.Columns) == 0 { + err = fmt.Errorf("select statements must have at least one result column") + return + } + + sql := &bytes.Buffer{} + + if len(d.Prefixes) > 0 { + args, err = appendToSql(d.Prefixes, sql, " ", args) + if err != nil { + return + } + + sql.WriteString(" ") + } + + sql.WriteString("SELECT ") + + if len(d.Options) > 0 { + sql.WriteString(strings.Join(d.Options, " ")) + sql.WriteString(" ") + } + + if len(d.Columns) > 0 { + args, err = appendToSql(d.Columns, sql, ", ", args) + if err != nil { + return + } + } + + if d.From != nil { + sql.WriteString(" FROM ") + args, err = appendToSql([]Sqlizer{d.From}, sql, "", args) + if err != nil { + return + } + } + + if len(d.Joins) > 0 { + sql.WriteString(" ") + args, err = appendToSql(d.Joins, sql, " ", args) + if err != nil { + return + } + } + + if len(d.WhereParts) > 0 { + sql.WriteString(" WHERE ") + args, err = appendToSql(d.WhereParts, sql, " AND ", args) + if err != nil { + return + } + } + + if len(d.GroupBys) > 0 { + sql.WriteString(" GROUP BY ") + sql.WriteString(strings.Join(d.GroupBys, ", ")) + } + + if len(d.HavingParts) > 0 { + sql.WriteString(" HAVING ") + args, err = appendToSql(d.HavingParts, sql, " AND ", args) + if err != nil { + return + } + } + + if len(d.OrderByParts) > 0 { + sql.WriteString(" ORDER BY ") + args, err = appendToSql(d.OrderByParts, sql, ", ", args) + if err != nil { + return + } + } + + if len(d.Limit) > 0 { + sql.WriteString(" LIMIT ") + sql.WriteString(d.Limit) + } + + if len(d.Offset) > 0 { + sql.WriteString(" OFFSET ") + sql.WriteString(d.Offset) + } + + if len(d.Suffixes) > 0 { + sql.WriteString(" ") + + args, err = appendToSql(d.Suffixes, sql, " ", args) + if err != nil { + return + } + } + + sqlStr = sql.String() + return +} + +// Builder + +// SelectBuilder builds SQL SELECT statements. +type SelectBuilder builder.Builder + +func init() { + builder.Register(SelectBuilder{}, selectData{}) +} + +// Format methods + +// PlaceholderFormat sets PlaceholderFormat (e.g. Question or Dollar) for the +// query. +func (b SelectBuilder) PlaceholderFormat(f PlaceholderFormat) SelectBuilder { + return builder.Set(b, "PlaceholderFormat", f).(SelectBuilder) +} + +// Runner methods + +// RunWith sets a Runner (like database/sql.DB) to be used with e.g. Exec. +// For most cases runner will be a database connection. +// +// Internally we use this to mock out the database connection for testing. +func (b SelectBuilder) RunWith(runner BaseRunner) SelectBuilder { + return setRunWith(b, runner).(SelectBuilder) +} + +// Exec builds and Execs the query with the Runner set by RunWith. +func (b SelectBuilder) Exec() (sql.Result, error) { + data := builder.GetStruct(b).(selectData) + return data.Exec() +} + +// Query builds and Querys the query with the Runner set by RunWith. +func (b SelectBuilder) Query() (*sql.Rows, error) { + data := builder.GetStruct(b).(selectData) + return data.Query() +} + +// QueryRow builds and QueryRows the query with the Runner set by RunWith. +func (b SelectBuilder) QueryRow() RowScanner { + data := builder.GetStruct(b).(selectData) + return data.QueryRow() +} + +// Scan is a shortcut for QueryRow().Scan. +func (b SelectBuilder) Scan(dest ...interface{}) error { + return b.QueryRow().Scan(dest...) +} + +// SQL methods + +// ToSql builds the query into a SQL string and bound args. +func (b SelectBuilder) ToSql() (string, []interface{}, error) { + data := builder.GetStruct(b).(selectData) + return data.ToSql() +} + +func (b SelectBuilder) toSqlRaw() (string, []interface{}, error) { + data := builder.GetStruct(b).(selectData) + return data.toSqlRaw() +} + +// MustSql builds the query into a SQL string and bound args. +// It panics if there are any errors. +func (b SelectBuilder) MustSql() (string, []interface{}) { + sql, args, err := b.ToSql() + if err != nil { + panic(err) + } + return sql, args +} + +// Prefix adds an expression to the beginning of the query +func (b SelectBuilder) Prefix(sql string, args ...interface{}) SelectBuilder { + return b.PrefixExpr(Expr(sql, args...)) +} + +// PrefixExpr adds an expression to the very beginning of the query +func (b SelectBuilder) PrefixExpr(expr Sqlizer) SelectBuilder { + return builder.Append(b, "Prefixes", expr).(SelectBuilder) +} + +// Distinct adds a DISTINCT clause to the query. +func (b SelectBuilder) Distinct() SelectBuilder { + return b.Options("DISTINCT") +} + +// Options adds select option to the query +func (b SelectBuilder) Options(options ...string) SelectBuilder { + return builder.Extend(b, "Options", options).(SelectBuilder) +} + +// Columns adds result columns to the query. +func (b SelectBuilder) Columns(columns ...string) SelectBuilder { + parts := make([]interface{}, 0, len(columns)) + for _, str := range columns { + parts = append(parts, newPart(str)) + } + return builder.Extend(b, "Columns", parts).(SelectBuilder) +} + +// Column adds a result column to the query. +// Unlike Columns, Column accepts args which will be bound to placeholders in +// the columns string, for example: +// Column("IF(col IN ("+squirrel.Placeholders(3)+"), 1, 0) as col", 1, 2, 3) +func (b SelectBuilder) Column(column interface{}, args ...interface{}) SelectBuilder { + return builder.Append(b, "Columns", newPart(column, args...)).(SelectBuilder) +} + +// From sets the FROM clause of the query. +func (b SelectBuilder) From(from string) SelectBuilder { + return builder.Set(b, "From", newPart(from)).(SelectBuilder) +} + +// FromSelect sets a subquery into the FROM clause of the query. +func (b SelectBuilder) FromSelect(from SelectBuilder, alias string) SelectBuilder { + // Prevent misnumbered parameters in nested selects (#183). + from = from.PlaceholderFormat(Question) + return builder.Set(b, "From", Alias(from, alias)).(SelectBuilder) +} + +// JoinClause adds a join clause to the query. +func (b SelectBuilder) JoinClause(pred interface{}, args ...interface{}) SelectBuilder { + return builder.Append(b, "Joins", newPart(pred, args...)).(SelectBuilder) +} + +// Join adds a JOIN clause to the query. +func (b SelectBuilder) Join(join string, rest ...interface{}) SelectBuilder { + return b.JoinClause("JOIN "+join, rest...) +} + +// LeftJoin adds a LEFT JOIN clause to the query. +func (b SelectBuilder) LeftJoin(join string, rest ...interface{}) SelectBuilder { + return b.JoinClause("LEFT JOIN "+join, rest...) +} + +// RightJoin adds a RIGHT JOIN clause to the query. +func (b SelectBuilder) RightJoin(join string, rest ...interface{}) SelectBuilder { + return b.JoinClause("RIGHT JOIN "+join, rest...) +} + +// InnerJoin adds a INNER JOIN clause to the query. +func (b SelectBuilder) InnerJoin(join string, rest ...interface{}) SelectBuilder { + return b.JoinClause("INNER JOIN "+join, rest...) +} + +// CrossJoin adds a CROSS JOIN clause to the query. +func (b SelectBuilder) CrossJoin(join string, rest ...interface{}) SelectBuilder { + return b.JoinClause("CROSS JOIN "+join, rest...) +} + +// Where adds an expression to the WHERE clause of the query. +// +// Expressions are ANDed together in the generated SQL. +// +// Where accepts several types for its pred argument: +// +// nil OR "" - ignored. +// +// string - SQL expression. +// If the expression has SQL placeholders then a set of arguments must be passed +// as well, one for each placeholder. +// +// map[string]interface{} OR Eq - map of SQL expressions to values. Each key is +// transformed into an expression like " = ?", with the corresponding value +// bound to the placeholder. If the value is nil, the expression will be " +// IS NULL". If the value is an array or slice, the expression will be " IN +// (?,?,...)", with one placeholder for each item in the value. These expressions +// are ANDed together. +// +// Where will panic if pred isn't any of the above types. +func (b SelectBuilder) Where(pred interface{}, args ...interface{}) SelectBuilder { + if pred == nil || pred == "" { + return b + } + return builder.Append(b, "WhereParts", newWherePart(pred, args...)).(SelectBuilder) +} + +// GroupBy adds GROUP BY expressions to the query. +func (b SelectBuilder) GroupBy(groupBys ...string) SelectBuilder { + return builder.Extend(b, "GroupBys", groupBys).(SelectBuilder) +} + +// Having adds an expression to the HAVING clause of the query. +// +// See Where. +func (b SelectBuilder) Having(pred interface{}, rest ...interface{}) SelectBuilder { + return builder.Append(b, "HavingParts", newWherePart(pred, rest...)).(SelectBuilder) +} + +// OrderByClause adds ORDER BY clause to the query. +func (b SelectBuilder) OrderByClause(pred interface{}, args ...interface{}) SelectBuilder { + return builder.Append(b, "OrderByParts", newPart(pred, args...)).(SelectBuilder) +} + +// OrderBy adds ORDER BY expressions to the query. +func (b SelectBuilder) OrderBy(orderBys ...string) SelectBuilder { + for _, orderBy := range orderBys { + b = b.OrderByClause(orderBy) + } + + return b +} + +// Limit sets a LIMIT clause on the query. +func (b SelectBuilder) Limit(limit uint64) SelectBuilder { + return builder.Set(b, "Limit", fmt.Sprintf("%d", limit)).(SelectBuilder) +} + +// Limit ALL allows to access all records with limit +func (b SelectBuilder) RemoveLimit() SelectBuilder { + return builder.Delete(b, "Limit").(SelectBuilder) +} + +// Offset sets a OFFSET clause on the query. +func (b SelectBuilder) Offset(offset uint64) SelectBuilder { + return builder.Set(b, "Offset", fmt.Sprintf("%d", offset)).(SelectBuilder) +} + +// RemoveOffset removes OFFSET clause. +func (b SelectBuilder) RemoveOffset() SelectBuilder { + return builder.Delete(b, "Offset").(SelectBuilder) +} + +// Suffix adds an expression to the end of the query +func (b SelectBuilder) Suffix(sql string, args ...interface{}) SelectBuilder { + return b.SuffixExpr(Expr(sql, args...)) +} + +// SuffixExpr adds an expression to the end of the query +func (b SelectBuilder) SuffixExpr(expr Sqlizer) SelectBuilder { + return builder.Append(b, "Suffixes", expr).(SelectBuilder) +} diff --git a/vendor/github.com/Masterminds/squirrel/select_ctx.go b/vendor/github.com/Masterminds/squirrel/select_ctx.go new file mode 100644 index 0000000000..4c42c13f47 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/select_ctx.go @@ -0,0 +1,69 @@ +// +build go1.8 + +package squirrel + +import ( + "context" + "database/sql" + + "github.com/lann/builder" +) + +func (d *selectData) ExecContext(ctx context.Context) (sql.Result, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + ctxRunner, ok := d.RunWith.(ExecerContext) + if !ok { + return nil, NoContextSupport + } + return ExecContextWith(ctx, ctxRunner, d) +} + +func (d *selectData) QueryContext(ctx context.Context) (*sql.Rows, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + ctxRunner, ok := d.RunWith.(QueryerContext) + if !ok { + return nil, NoContextSupport + } + return QueryContextWith(ctx, ctxRunner, d) +} + +func (d *selectData) QueryRowContext(ctx context.Context) RowScanner { + if d.RunWith == nil { + return &Row{err: RunnerNotSet} + } + queryRower, ok := d.RunWith.(QueryRowerContext) + if !ok { + if _, ok := d.RunWith.(QueryerContext); !ok { + return &Row{err: RunnerNotQueryRunner} + } + return &Row{err: NoContextSupport} + } + return QueryRowContextWith(ctx, queryRower, d) +} + +// ExecContext builds and ExecContexts the query with the Runner set by RunWith. +func (b SelectBuilder) ExecContext(ctx context.Context) (sql.Result, error) { + data := builder.GetStruct(b).(selectData) + return data.ExecContext(ctx) +} + +// QueryContext builds and QueryContexts the query with the Runner set by RunWith. +func (b SelectBuilder) QueryContext(ctx context.Context) (*sql.Rows, error) { + data := builder.GetStruct(b).(selectData) + return data.QueryContext(ctx) +} + +// QueryRowContext builds and QueryRowContexts the query with the Runner set by RunWith. +func (b SelectBuilder) QueryRowContext(ctx context.Context) RowScanner { + data := builder.GetStruct(b).(selectData) + return data.QueryRowContext(ctx) +} + +// ScanContext is a shortcut for QueryRowContext().Scan. +func (b SelectBuilder) ScanContext(ctx context.Context, dest ...interface{}) error { + return b.QueryRowContext(ctx).Scan(dest...) +} diff --git a/vendor/github.com/Masterminds/squirrel/squirrel.go b/vendor/github.com/Masterminds/squirrel/squirrel.go new file mode 100644 index 0000000000..46d456eb62 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/squirrel.go @@ -0,0 +1,183 @@ +// Package squirrel provides a fluent SQL generator. +// +// See https://github.com/Masterminds/squirrel for examples. +package squirrel + +import ( + "bytes" + "database/sql" + "fmt" + "strings" + + "github.com/lann/builder" +) + +// Sqlizer is the interface that wraps the ToSql method. +// +// ToSql returns a SQL representation of the Sqlizer, along with a slice of args +// as passed to e.g. database/sql.Exec. It can also return an error. +type Sqlizer interface { + ToSql() (string, []interface{}, error) +} + +// rawSqlizer is expected to do what Sqlizer does, but without finalizing placeholders. +// This is useful for nested queries. +type rawSqlizer interface { + toSqlRaw() (string, []interface{}, error) +} + +// Execer is the interface that wraps the Exec method. +// +// Exec executes the given query as implemented by database/sql.Exec. +type Execer interface { + Exec(query string, args ...interface{}) (sql.Result, error) +} + +// Queryer is the interface that wraps the Query method. +// +// Query executes the given query as implemented by database/sql.Query. +type Queryer interface { + Query(query string, args ...interface{}) (*sql.Rows, error) +} + +// QueryRower is the interface that wraps the QueryRow method. +// +// QueryRow executes the given query as implemented by database/sql.QueryRow. +type QueryRower interface { + QueryRow(query string, args ...interface{}) RowScanner +} + +// BaseRunner groups the Execer and Queryer interfaces. +type BaseRunner interface { + Execer + Queryer +} + +// Runner groups the Execer, Queryer, and QueryRower interfaces. +type Runner interface { + Execer + Queryer + QueryRower +} + +// WrapStdSql wraps a type implementing the standard SQL interface with methods that +// squirrel expects. +func WrapStdSql(stdSql StdSql) Runner { + return &stdsqlRunner{stdSql} +} + +// StdSql encompasses the standard methods of the *sql.DB type, and other types that +// wrap these methods. +type StdSql interface { + Query(string, ...interface{}) (*sql.Rows, error) + QueryRow(string, ...interface{}) *sql.Row + Exec(string, ...interface{}) (sql.Result, error) +} + +type stdsqlRunner struct { + StdSql +} + +func (r *stdsqlRunner) QueryRow(query string, args ...interface{}) RowScanner { + return r.StdSql.QueryRow(query, args...) +} + +func setRunWith(b interface{}, runner BaseRunner) interface{} { + switch r := runner.(type) { + case StdSqlCtx: + runner = WrapStdSqlCtx(r) + case StdSql: + runner = WrapStdSql(r) + } + return builder.Set(b, "RunWith", runner) +} + +// RunnerNotSet is returned by methods that need a Runner if it isn't set. +var RunnerNotSet = fmt.Errorf("cannot run; no Runner set (RunWith)") + +// RunnerNotQueryRunner is returned by QueryRow if the RunWith value doesn't implement QueryRower. +var RunnerNotQueryRunner = fmt.Errorf("cannot QueryRow; Runner is not a QueryRower") + +// ExecWith Execs the SQL returned by s with db. +func ExecWith(db Execer, s Sqlizer) (res sql.Result, err error) { + query, args, err := s.ToSql() + if err != nil { + return + } + return db.Exec(query, args...) +} + +// QueryWith Querys the SQL returned by s with db. +func QueryWith(db Queryer, s Sqlizer) (rows *sql.Rows, err error) { + query, args, err := s.ToSql() + if err != nil { + return + } + return db.Query(query, args...) +} + +// QueryRowWith QueryRows the SQL returned by s with db. +func QueryRowWith(db QueryRower, s Sqlizer) RowScanner { + query, args, err := s.ToSql() + return &Row{RowScanner: db.QueryRow(query, args...), err: err} +} + +// DebugSqlizer calls ToSql on s and shows the approximate SQL to be executed +// +// If ToSql returns an error, the result of this method will look like: +// "[ToSql error: %s]" or "[DebugSqlizer error: %s]" +// +// IMPORTANT: As its name suggests, this function should only be used for +// debugging. While the string result *might* be valid SQL, this function does +// not try very hard to ensure it. Additionally, executing the output of this +// function with any untrusted user input is certainly insecure. +func DebugSqlizer(s Sqlizer) string { + sql, args, err := s.ToSql() + if err != nil { + return fmt.Sprintf("[ToSql error: %s]", err) + } + + var placeholder string + downCast, ok := s.(placeholderDebugger) + if !ok { + placeholder = "?" + } else { + placeholder = downCast.debugPlaceholder() + } + // TODO: dedupe this with placeholder.go + buf := &bytes.Buffer{} + i := 0 + for { + p := strings.Index(sql, placeholder) + if p == -1 { + break + } + if len(sql[p:]) > 1 && sql[p:p+2] == "??" { // escape ?? => ? + buf.WriteString(sql[:p]) + buf.WriteString("?") + if len(sql[p:]) == 1 { + break + } + sql = sql[p+2:] + } else { + if i+1 > len(args) { + return fmt.Sprintf( + "[DebugSqlizer error: too many placeholders in %#v for %d args]", + sql, len(args)) + } + buf.WriteString(sql[:p]) + fmt.Fprintf(buf, "'%v'", args[i]) + // advance our sql string "cursor" beyond the arg we placed + sql = sql[p+1:] + i++ + } + } + if i < len(args) { + return fmt.Sprintf( + "[DebugSqlizer error: not enough placeholders in %#v for %d args]", + sql, len(args)) + } + // "append" any remaning sql that won't need interpolating + buf.WriteString(sql) + return buf.String() +} diff --git a/vendor/github.com/Masterminds/squirrel/squirrel_ctx.go b/vendor/github.com/Masterminds/squirrel/squirrel_ctx.go new file mode 100644 index 0000000000..504e763d2e --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/squirrel_ctx.go @@ -0,0 +1,93 @@ +// +build go1.8 + +package squirrel + +import ( + "context" + "database/sql" + "errors" +) + +// NoContextSupport is returned if a db doesn't support Context. +var NoContextSupport = errors.New("DB does not support Context") + +// ExecerContext is the interface that wraps the ExecContext method. +// +// Exec executes the given query as implemented by database/sql.ExecContext. +type ExecerContext interface { + ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) +} + +// QueryerContext is the interface that wraps the QueryContext method. +// +// QueryContext executes the given query as implemented by database/sql.QueryContext. +type QueryerContext interface { + QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) +} + +// QueryRowerContext is the interface that wraps the QueryRowContext method. +// +// QueryRowContext executes the given query as implemented by database/sql.QueryRowContext. +type QueryRowerContext interface { + QueryRowContext(ctx context.Context, query string, args ...interface{}) RowScanner +} + +// RunnerContext groups the Runner interface, along with the Contect versions of each of +// its methods +type RunnerContext interface { + Runner + QueryerContext + QueryRowerContext + ExecerContext +} + +// WrapStdSqlCtx wraps a type implementing the standard SQL interface plus the context +// versions of the methods with methods that squirrel expects. +func WrapStdSqlCtx(stdSqlCtx StdSqlCtx) RunnerContext { + return &stdsqlCtxRunner{stdSqlCtx} +} + +// StdSqlCtx encompasses the standard methods of the *sql.DB type, along with the Context +// versions of those methods, and other types that wrap these methods. +type StdSqlCtx interface { + StdSql + QueryContext(context.Context, string, ...interface{}) (*sql.Rows, error) + QueryRowContext(context.Context, string, ...interface{}) *sql.Row + ExecContext(context.Context, string, ...interface{}) (sql.Result, error) +} + +type stdsqlCtxRunner struct { + StdSqlCtx +} + +func (r *stdsqlCtxRunner) QueryRow(query string, args ...interface{}) RowScanner { + return r.StdSqlCtx.QueryRow(query, args...) +} + +func (r *stdsqlCtxRunner) QueryRowContext(ctx context.Context, query string, args ...interface{}) RowScanner { + return r.StdSqlCtx.QueryRowContext(ctx, query, args...) +} + +// ExecContextWith ExecContexts the SQL returned by s with db. +func ExecContextWith(ctx context.Context, db ExecerContext, s Sqlizer) (res sql.Result, err error) { + query, args, err := s.ToSql() + if err != nil { + return + } + return db.ExecContext(ctx, query, args...) +} + +// QueryContextWith QueryContexts the SQL returned by s with db. +func QueryContextWith(ctx context.Context, db QueryerContext, s Sqlizer) (rows *sql.Rows, err error) { + query, args, err := s.ToSql() + if err != nil { + return + } + return db.QueryContext(ctx, query, args...) +} + +// QueryRowContextWith QueryRowContexts the SQL returned by s with db. +func QueryRowContextWith(ctx context.Context, db QueryRowerContext, s Sqlizer) RowScanner { + query, args, err := s.ToSql() + return &Row{RowScanner: db.QueryRowContext(ctx, query, args...), err: err} +} diff --git a/vendor/github.com/Masterminds/squirrel/statement.go b/vendor/github.com/Masterminds/squirrel/statement.go new file mode 100644 index 0000000000..9420c67f8e --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/statement.go @@ -0,0 +1,104 @@ +package squirrel + +import "github.com/lann/builder" + +// StatementBuilderType is the type of StatementBuilder. +type StatementBuilderType builder.Builder + +// Select returns a SelectBuilder for this StatementBuilderType. +func (b StatementBuilderType) Select(columns ...string) SelectBuilder { + return SelectBuilder(b).Columns(columns...) +} + +// Insert returns a InsertBuilder for this StatementBuilderType. +func (b StatementBuilderType) Insert(into string) InsertBuilder { + return InsertBuilder(b).Into(into) +} + +// Replace returns a InsertBuilder for this StatementBuilderType with the +// statement keyword set to "REPLACE". +func (b StatementBuilderType) Replace(into string) InsertBuilder { + return InsertBuilder(b).statementKeyword("REPLACE").Into(into) +} + +// Update returns a UpdateBuilder for this StatementBuilderType. +func (b StatementBuilderType) Update(table string) UpdateBuilder { + return UpdateBuilder(b).Table(table) +} + +// Delete returns a DeleteBuilder for this StatementBuilderType. +func (b StatementBuilderType) Delete(from string) DeleteBuilder { + return DeleteBuilder(b).From(from) +} + +// PlaceholderFormat sets the PlaceholderFormat field for any child builders. +func (b StatementBuilderType) PlaceholderFormat(f PlaceholderFormat) StatementBuilderType { + return builder.Set(b, "PlaceholderFormat", f).(StatementBuilderType) +} + +// RunWith sets the RunWith field for any child builders. +func (b StatementBuilderType) RunWith(runner BaseRunner) StatementBuilderType { + return setRunWith(b, runner).(StatementBuilderType) +} + +// Where adds WHERE expressions to the query. +// +// See SelectBuilder.Where for more information. +func (b StatementBuilderType) Where(pred interface{}, args ...interface{}) StatementBuilderType { + return builder.Append(b, "WhereParts", newWherePart(pred, args...)).(StatementBuilderType) +} + +// StatementBuilder is a parent builder for other builders, e.g. SelectBuilder. +var StatementBuilder = StatementBuilderType(builder.EmptyBuilder).PlaceholderFormat(Question) + +// Select returns a new SelectBuilder, optionally setting some result columns. +// +// See SelectBuilder.Columns. +func Select(columns ...string) SelectBuilder { + return StatementBuilder.Select(columns...) +} + +// Insert returns a new InsertBuilder with the given table name. +// +// See InsertBuilder.Into. +func Insert(into string) InsertBuilder { + return StatementBuilder.Insert(into) +} + +// Replace returns a new InsertBuilder with the statement keyword set to +// "REPLACE" and with the given table name. +// +// See InsertBuilder.Into. +func Replace(into string) InsertBuilder { + return StatementBuilder.Replace(into) +} + +// Update returns a new UpdateBuilder with the given table name. +// +// See UpdateBuilder.Table. +func Update(table string) UpdateBuilder { + return StatementBuilder.Update(table) +} + +// Delete returns a new DeleteBuilder with the given table name. +// +// See DeleteBuilder.Table. +func Delete(from string) DeleteBuilder { + return StatementBuilder.Delete(from) +} + +// Case returns a new CaseBuilder +// "what" represents case value +func Case(what ...interface{}) CaseBuilder { + b := CaseBuilder(builder.EmptyBuilder) + + switch len(what) { + case 0: + case 1: + b = b.what(what[0]) + default: + b = b.what(newPart(what[0], what[1:]...)) + + } + return b +} diff --git a/vendor/github.com/Masterminds/squirrel/stmtcacher.go b/vendor/github.com/Masterminds/squirrel/stmtcacher.go new file mode 100644 index 0000000000..5bf267a136 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/stmtcacher.go @@ -0,0 +1,121 @@ +package squirrel + +import ( + "database/sql" + "fmt" + "sync" +) + +// Prepareer is the interface that wraps the Prepare method. +// +// Prepare executes the given query as implemented by database/sql.Prepare. +type Preparer interface { + Prepare(query string) (*sql.Stmt, error) +} + +// DBProxy groups the Execer, Queryer, QueryRower, and Preparer interfaces. +type DBProxy interface { + Execer + Queryer + QueryRower + Preparer +} + +// NOTE: NewStmtCache is defined in stmtcacher_ctx.go (Go >= 1.8) or stmtcacher_noctx.go (Go < 1.8). + +// StmtCache wraps and delegates down to a Preparer type +// +// It also automatically prepares all statements sent to the underlying Preparer calls +// for Exec, Query and QueryRow and caches the returns *sql.Stmt using the provided +// query as the key. So that it can be automatically re-used. +type StmtCache struct { + prep Preparer + cache map[string]*sql.Stmt + mu sync.Mutex +} + +// Prepare delegates down to the underlying Preparer and caches the result +// using the provided query as a key +func (sc *StmtCache) Prepare(query string) (*sql.Stmt, error) { + sc.mu.Lock() + defer sc.mu.Unlock() + + stmt, ok := sc.cache[query] + if ok { + return stmt, nil + } + stmt, err := sc.prep.Prepare(query) + if err == nil { + sc.cache[query] = stmt + } + return stmt, err +} + +// Exec delegates down to the underlying Preparer using a prepared statement +func (sc *StmtCache) Exec(query string, args ...interface{}) (res sql.Result, err error) { + stmt, err := sc.Prepare(query) + if err != nil { + return + } + return stmt.Exec(args...) +} + +// Query delegates down to the underlying Preparer using a prepared statement +func (sc *StmtCache) Query(query string, args ...interface{}) (rows *sql.Rows, err error) { + stmt, err := sc.Prepare(query) + if err != nil { + return + } + return stmt.Query(args...) +} + +// QueryRow delegates down to the underlying Preparer using a prepared statement +func (sc *StmtCache) QueryRow(query string, args ...interface{}) RowScanner { + stmt, err := sc.Prepare(query) + if err != nil { + return &Row{err: err} + } + return stmt.QueryRow(args...) +} + +// Clear removes and closes all the currently cached prepared statements +func (sc *StmtCache) Clear() (err error) { + sc.mu.Lock() + defer sc.mu.Unlock() + + for key, stmt := range sc.cache { + delete(sc.cache, key) + + if stmt == nil { + continue + } + + if cerr := stmt.Close(); cerr != nil { + err = cerr + } + } + + if err != nil { + return fmt.Errorf("one or more Stmt.Close failed; last error: %v", err) + } + + return +} + +type DBProxyBeginner interface { + DBProxy + Begin() (*sql.Tx, error) +} + +type stmtCacheProxy struct { + DBProxy + db *sql.DB +} + +func NewStmtCacheProxy(db *sql.DB) DBProxyBeginner { + return &stmtCacheProxy{DBProxy: NewStmtCache(db), db: db} +} + +func (sp *stmtCacheProxy) Begin() (*sql.Tx, error) { + return sp.db.Begin() +} diff --git a/vendor/github.com/Masterminds/squirrel/stmtcacher_ctx.go b/vendor/github.com/Masterminds/squirrel/stmtcacher_ctx.go new file mode 100644 index 0000000000..53603cf4c9 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/stmtcacher_ctx.go @@ -0,0 +1,86 @@ +// +build go1.8 + +package squirrel + +import ( + "context" + "database/sql" +) + +// PrepareerContext is the interface that wraps the Prepare and PrepareContext methods. +// +// Prepare executes the given query as implemented by database/sql.Prepare. +// PrepareContext executes the given query as implemented by database/sql.PrepareContext. +type PreparerContext interface { + Preparer + PrepareContext(ctx context.Context, query string) (*sql.Stmt, error) +} + +// DBProxyContext groups the Execer, Queryer, QueryRower and PreparerContext interfaces. +type DBProxyContext interface { + Execer + Queryer + QueryRower + PreparerContext +} + +// NewStmtCache returns a *StmtCache wrapping a PreparerContext that caches Prepared Stmts. +// +// Stmts are cached based on the string value of their queries. +func NewStmtCache(prep PreparerContext) *StmtCache { + return &StmtCache{prep: prep, cache: make(map[string]*sql.Stmt)} +} + +// NewStmtCacher is deprecated +// +// Use NewStmtCache instead +func NewStmtCacher(prep PreparerContext) DBProxyContext { + return NewStmtCache(prep) +} + +// PrepareContext delegates down to the underlying PreparerContext and caches the result +// using the provided query as a key +func (sc *StmtCache) PrepareContext(ctx context.Context, query string) (*sql.Stmt, error) { + ctxPrep, ok := sc.prep.(PreparerContext) + if !ok { + return nil, NoContextSupport + } + sc.mu.Lock() + defer sc.mu.Unlock() + stmt, ok := sc.cache[query] + if ok { + return stmt, nil + } + stmt, err := ctxPrep.PrepareContext(ctx, query) + if err == nil { + sc.cache[query] = stmt + } + return stmt, err +} + +// ExecContext delegates down to the underlying PreparerContext using a prepared statement +func (sc *StmtCache) ExecContext(ctx context.Context, query string, args ...interface{}) (res sql.Result, err error) { + stmt, err := sc.PrepareContext(ctx, query) + if err != nil { + return + } + return stmt.ExecContext(ctx, args...) +} + +// QueryContext delegates down to the underlying PreparerContext using a prepared statement +func (sc *StmtCache) QueryContext(ctx context.Context, query string, args ...interface{}) (rows *sql.Rows, err error) { + stmt, err := sc.PrepareContext(ctx, query) + if err != nil { + return + } + return stmt.QueryContext(ctx, args...) +} + +// QueryRowContext delegates down to the underlying PreparerContext using a prepared statement +func (sc *StmtCache) QueryRowContext(ctx context.Context, query string, args ...interface{}) RowScanner { + stmt, err := sc.PrepareContext(ctx, query) + if err != nil { + return &Row{err: err} + } + return stmt.QueryRowContext(ctx, args...) +} diff --git a/vendor/github.com/Masterminds/squirrel/stmtcacher_noctx.go b/vendor/github.com/Masterminds/squirrel/stmtcacher_noctx.go new file mode 100644 index 0000000000..deac96777b --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/stmtcacher_noctx.go @@ -0,0 +1,21 @@ +// +build !go1.8 + +package squirrel + +import ( + "database/sql" +) + +// NewStmtCacher returns a DBProxy wrapping prep that caches Prepared Stmts. +// +// Stmts are cached based on the string value of their queries. +func NewStmtCache(prep Preparer) *StmtCache { + return &StmtCacher{prep: prep, cache: make(map[string]*sql.Stmt)} +} + +// NewStmtCacher is deprecated +// +// Use NewStmtCache instead +func NewStmtCacher(prep Preparer) DBProxy { + return NewStmtCache(prep) +} diff --git a/vendor/github.com/Masterminds/squirrel/update.go b/vendor/github.com/Masterminds/squirrel/update.go new file mode 100644 index 0000000000..8d658d7219 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/update.go @@ -0,0 +1,266 @@ +package squirrel + +import ( + "bytes" + "database/sql" + "fmt" + "sort" + "strings" + + "github.com/lann/builder" +) + +type updateData struct { + PlaceholderFormat PlaceholderFormat + RunWith BaseRunner + Prefixes []Sqlizer + Table string + SetClauses []setClause + WhereParts []Sqlizer + OrderBys []string + Limit string + Offset string + Suffixes []Sqlizer +} + +type setClause struct { + column string + value interface{} +} + +func (d *updateData) Exec() (sql.Result, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + return ExecWith(d.RunWith, d) +} + +func (d *updateData) Query() (*sql.Rows, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + return QueryWith(d.RunWith, d) +} + +func (d *updateData) QueryRow() RowScanner { + if d.RunWith == nil { + return &Row{err: RunnerNotSet} + } + queryRower, ok := d.RunWith.(QueryRower) + if !ok { + return &Row{err: RunnerNotQueryRunner} + } + return QueryRowWith(queryRower, d) +} + +func (d *updateData) ToSql() (sqlStr string, args []interface{}, err error) { + if len(d.Table) == 0 { + err = fmt.Errorf("update statements must specify a table") + return + } + if len(d.SetClauses) == 0 { + err = fmt.Errorf("update statements must have at least one Set clause") + return + } + + sql := &bytes.Buffer{} + + if len(d.Prefixes) > 0 { + args, err = appendToSql(d.Prefixes, sql, " ", args) + if err != nil { + return + } + + sql.WriteString(" ") + } + + sql.WriteString("UPDATE ") + sql.WriteString(d.Table) + + sql.WriteString(" SET ") + setSqls := make([]string, len(d.SetClauses)) + for i, setClause := range d.SetClauses { + var valSql string + if vs, ok := setClause.value.(Sqlizer); ok { + vsql, vargs, err := vs.ToSql() + if err != nil { + return "", nil, err + } + if _, ok := vs.(SelectBuilder); ok { + valSql = fmt.Sprintf("(%s)", vsql) + } else { + valSql = vsql + } + args = append(args, vargs...) + } else { + valSql = "?" + args = append(args, setClause.value) + } + setSqls[i] = fmt.Sprintf("%s = %s", setClause.column, valSql) + } + sql.WriteString(strings.Join(setSqls, ", ")) + + if len(d.WhereParts) > 0 { + sql.WriteString(" WHERE ") + args, err = appendToSql(d.WhereParts, sql, " AND ", args) + if err != nil { + return + } + } + + if len(d.OrderBys) > 0 { + sql.WriteString(" ORDER BY ") + sql.WriteString(strings.Join(d.OrderBys, ", ")) + } + + if len(d.Limit) > 0 { + sql.WriteString(" LIMIT ") + sql.WriteString(d.Limit) + } + + if len(d.Offset) > 0 { + sql.WriteString(" OFFSET ") + sql.WriteString(d.Offset) + } + + if len(d.Suffixes) > 0 { + sql.WriteString(" ") + args, err = appendToSql(d.Suffixes, sql, " ", args) + if err != nil { + return + } + } + + sqlStr, err = d.PlaceholderFormat.ReplacePlaceholders(sql.String()) + return +} + +// Builder + +// UpdateBuilder builds SQL UPDATE statements. +type UpdateBuilder builder.Builder + +func init() { + builder.Register(UpdateBuilder{}, updateData{}) +} + +// Format methods + +// PlaceholderFormat sets PlaceholderFormat (e.g. Question or Dollar) for the +// query. +func (b UpdateBuilder) PlaceholderFormat(f PlaceholderFormat) UpdateBuilder { + return builder.Set(b, "PlaceholderFormat", f).(UpdateBuilder) +} + +// Runner methods + +// RunWith sets a Runner (like database/sql.DB) to be used with e.g. Exec. +func (b UpdateBuilder) RunWith(runner BaseRunner) UpdateBuilder { + return setRunWith(b, runner).(UpdateBuilder) +} + +// Exec builds and Execs the query with the Runner set by RunWith. +func (b UpdateBuilder) Exec() (sql.Result, error) { + data := builder.GetStruct(b).(updateData) + return data.Exec() +} + +func (b UpdateBuilder) Query() (*sql.Rows, error) { + data := builder.GetStruct(b).(updateData) + return data.Query() +} + +func (b UpdateBuilder) QueryRow() RowScanner { + data := builder.GetStruct(b).(updateData) + return data.QueryRow() +} + +func (b UpdateBuilder) Scan(dest ...interface{}) error { + return b.QueryRow().Scan(dest...) +} + +// SQL methods + +// ToSql builds the query into a SQL string and bound args. +func (b UpdateBuilder) ToSql() (string, []interface{}, error) { + data := builder.GetStruct(b).(updateData) + return data.ToSql() +} + +// MustSql builds the query into a SQL string and bound args. +// It panics if there are any errors. +func (b UpdateBuilder) MustSql() (string, []interface{}) { + sql, args, err := b.ToSql() + if err != nil { + panic(err) + } + return sql, args +} + +// Prefix adds an expression to the beginning of the query +func (b UpdateBuilder) Prefix(sql string, args ...interface{}) UpdateBuilder { + return b.PrefixExpr(Expr(sql, args...)) +} + +// PrefixExpr adds an expression to the very beginning of the query +func (b UpdateBuilder) PrefixExpr(expr Sqlizer) UpdateBuilder { + return builder.Append(b, "Prefixes", expr).(UpdateBuilder) +} + +// Table sets the table to be updated. +func (b UpdateBuilder) Table(table string) UpdateBuilder { + return builder.Set(b, "Table", table).(UpdateBuilder) +} + +// Set adds SET clauses to the query. +func (b UpdateBuilder) Set(column string, value interface{}) UpdateBuilder { + return builder.Append(b, "SetClauses", setClause{column: column, value: value}).(UpdateBuilder) +} + +// SetMap is a convenience method which calls .Set for each key/value pair in clauses. +func (b UpdateBuilder) SetMap(clauses map[string]interface{}) UpdateBuilder { + keys := make([]string, len(clauses)) + i := 0 + for key := range clauses { + keys[i] = key + i++ + } + sort.Strings(keys) + for _, key := range keys { + val, _ := clauses[key] + b = b.Set(key, val) + } + return b +} + +// Where adds WHERE expressions to the query. +// +// See SelectBuilder.Where for more information. +func (b UpdateBuilder) Where(pred interface{}, args ...interface{}) UpdateBuilder { + return builder.Append(b, "WhereParts", newWherePart(pred, args...)).(UpdateBuilder) +} + +// OrderBy adds ORDER BY expressions to the query. +func (b UpdateBuilder) OrderBy(orderBys ...string) UpdateBuilder { + return builder.Extend(b, "OrderBys", orderBys).(UpdateBuilder) +} + +// Limit sets a LIMIT clause on the query. +func (b UpdateBuilder) Limit(limit uint64) UpdateBuilder { + return builder.Set(b, "Limit", fmt.Sprintf("%d", limit)).(UpdateBuilder) +} + +// Offset sets a OFFSET clause on the query. +func (b UpdateBuilder) Offset(offset uint64) UpdateBuilder { + return builder.Set(b, "Offset", fmt.Sprintf("%d", offset)).(UpdateBuilder) +} + +// Suffix adds an expression to the end of the query +func (b UpdateBuilder) Suffix(sql string, args ...interface{}) UpdateBuilder { + return b.SuffixExpr(Expr(sql, args...)) +} + +// SuffixExpr adds an expression to the end of the query +func (b UpdateBuilder) SuffixExpr(expr Sqlizer) UpdateBuilder { + return builder.Append(b, "Suffixes", expr).(UpdateBuilder) +} diff --git a/vendor/github.com/Masterminds/squirrel/update_ctx.go b/vendor/github.com/Masterminds/squirrel/update_ctx.go new file mode 100644 index 0000000000..ad479f96f4 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/update_ctx.go @@ -0,0 +1,69 @@ +// +build go1.8 + +package squirrel + +import ( + "context" + "database/sql" + + "github.com/lann/builder" +) + +func (d *updateData) ExecContext(ctx context.Context) (sql.Result, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + ctxRunner, ok := d.RunWith.(ExecerContext) + if !ok { + return nil, NoContextSupport + } + return ExecContextWith(ctx, ctxRunner, d) +} + +func (d *updateData) QueryContext(ctx context.Context) (*sql.Rows, error) { + if d.RunWith == nil { + return nil, RunnerNotSet + } + ctxRunner, ok := d.RunWith.(QueryerContext) + if !ok { + return nil, NoContextSupport + } + return QueryContextWith(ctx, ctxRunner, d) +} + +func (d *updateData) QueryRowContext(ctx context.Context) RowScanner { + if d.RunWith == nil { + return &Row{err: RunnerNotSet} + } + queryRower, ok := d.RunWith.(QueryRowerContext) + if !ok { + if _, ok := d.RunWith.(QueryerContext); !ok { + return &Row{err: RunnerNotQueryRunner} + } + return &Row{err: NoContextSupport} + } + return QueryRowContextWith(ctx, queryRower, d) +} + +// ExecContext builds and ExecContexts the query with the Runner set by RunWith. +func (b UpdateBuilder) ExecContext(ctx context.Context) (sql.Result, error) { + data := builder.GetStruct(b).(updateData) + return data.ExecContext(ctx) +} + +// QueryContext builds and QueryContexts the query with the Runner set by RunWith. +func (b UpdateBuilder) QueryContext(ctx context.Context) (*sql.Rows, error) { + data := builder.GetStruct(b).(updateData) + return data.QueryContext(ctx) +} + +// QueryRowContext builds and QueryRowContexts the query with the Runner set by RunWith. +func (b UpdateBuilder) QueryRowContext(ctx context.Context) RowScanner { + data := builder.GetStruct(b).(updateData) + return data.QueryRowContext(ctx) +} + +// ScanContext is a shortcut for QueryRowContext().Scan. +func (b UpdateBuilder) ScanContext(ctx context.Context, dest ...interface{}) error { + return b.QueryRowContext(ctx).Scan(dest...) +} diff --git a/vendor/github.com/Masterminds/squirrel/where.go b/vendor/github.com/Masterminds/squirrel/where.go new file mode 100644 index 0000000000..976b63ace4 --- /dev/null +++ b/vendor/github.com/Masterminds/squirrel/where.go @@ -0,0 +1,30 @@ +package squirrel + +import ( + "fmt" +) + +type wherePart part + +func newWherePart(pred interface{}, args ...interface{}) Sqlizer { + return &wherePart{pred: pred, args: args} +} + +func (p wherePart) ToSql() (sql string, args []interface{}, err error) { + switch pred := p.pred.(type) { + case nil: + // no-op + case rawSqlizer: + return pred.toSqlRaw() + case Sqlizer: + return pred.ToSql() + case map[string]interface{}: + return Eq(pred).ToSql() + case string: + sql = pred + args = p.args + default: + err = fmt.Errorf("expected string-keyed map or string, not %T", pred) + } + return +} diff --git a/vendor/github.com/chai2010/gettext-go/LICENSE b/vendor/github.com/chai2010/gettext-go/LICENSE new file mode 100644 index 0000000000..8f39408250 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/LICENSE @@ -0,0 +1,27 @@ +Copyright 2013 ChaiShushan . All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/chai2010/gettext-go/gettext/caller.go b/vendor/github.com/chai2010/gettext-go/gettext/caller.go new file mode 100644 index 0000000000..e24aab3756 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/gettext/caller.go @@ -0,0 +1,39 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gettext + +import ( + "regexp" + "runtime" +) + +var ( + reInit = regexp.MustCompile(`init·\d+$`) // main.init·1 + reClosure = regexp.MustCompile(`func·\d+$`) // main.func·001 +) + +// caller types: +// runtime.goexit +// runtime.main +// main.init +// main.main +// main.init·1 -> main.init +// main.func·001 -> main.func +// code.google.com/p/gettext-go/gettext.TestCallerName +// ... +func callerName(skip int) string { + pc, _, _, ok := runtime.Caller(skip) + if !ok { + return "" + } + name := runtime.FuncForPC(pc).Name() + if reInit.MatchString(name) { + return reInit.ReplaceAllString(name, "init") + } + if reClosure.MatchString(name) { + return reClosure.ReplaceAllString(name, "func") + } + return name +} diff --git a/vendor/github.com/chai2010/gettext-go/gettext/doc.go b/vendor/github.com/chai2010/gettext-go/gettext/doc.go new file mode 100644 index 0000000000..422bf2c6d7 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/gettext/doc.go @@ -0,0 +1,66 @@ +// Copyright 2013 . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package gettext implements a basic GNU's gettext library. + +Example: + import ( + "github.com/chai2010/gettext-go/gettext" + ) + + func main() { + gettext.SetLocale("zh_CN") + gettext.Textdomain("hello") + + // gettext.BindTextdomain("hello", "local", nil) // from local dir + // gettext.BindTextdomain("hello", "local.zip", nil) // from local zip file + // gettext.BindTextdomain("hello", "local.zip", zipData) // from embedded zip data + + gettext.BindTextdomain("hello", "local", nil) + + // translate source text + fmt.Println(gettext.Gettext("Hello, world!")) + // Output: 你好, 世界! + + // translate resource + fmt.Println(string(gettext.Getdata("poems.txt"))) + // Output: ... + } + +Translate directory struct("../examples/local.zip"): + + Root: "path" or "file.zip/zipBaseName" + +-default # local: $(LC_MESSAGES) or $(LANG) or "default" + | +-LC_MESSAGES # just for `gettext.Gettext` + | | +-hello.mo # $(Root)/$(local)/LC_MESSAGES/$(domain).mo + | | \-hello.po # $(Root)/$(local)/LC_MESSAGES/$(domain).mo + | | + | \-LC_RESOURCE # just for `gettext.Getdata` + | +-hello # domain map a dir in resource translate + | +-favicon.ico # $(Root)/$(local)/LC_RESOURCE/$(domain)/$(filename) + | \-poems.txt + | + \-zh_CN # simple chinese translate + +-LC_MESSAGES + | +-hello.mo # try "$(domain).mo" first + | \-hello.po # try "$(domain).po" second + | + \-LC_RESOURCE + +-hello + +-favicon.ico # try "$(local)/$(domain)/file" first + \-poems.txt # try "default/$(domain)/file" second + +See: + http://en.wikipedia.org/wiki/Gettext + http://www.gnu.org/software/gettext/manual/html_node + http://www.gnu.org/software/gettext/manual/html_node/Header-Entry.html + http://www.gnu.org/software/gettext/manual/html_node/PO-Files.html + http://www.gnu.org/software/gettext/manual/html_node/MO-Files.html + http://www.poedit.net/ + +Please report bugs to . +Thanks! +*/ +package gettext diff --git a/vendor/github.com/chai2010/gettext-go/gettext/domain.go b/vendor/github.com/chai2010/gettext-go/gettext/domain.go new file mode 100644 index 0000000000..f860b27b6b --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/gettext/domain.go @@ -0,0 +1,119 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gettext + +import ( + "sync" +) + +type domainManager struct { + mutex sync.Mutex + locale string + domain string + domainMap map[string]*fileSystem + trTextMap map[string]*translator +} + +func newDomainManager() *domainManager { + return &domainManager{ + locale: DefaultLocale, + domainMap: make(map[string]*fileSystem), + trTextMap: make(map[string]*translator), + } +} + +func (p *domainManager) makeTrMapKey(domain, locale string) string { + return domain + "_$$$_" + locale +} + +func (p *domainManager) Bind(domain, path string, data []byte) (domains, paths []string) { + p.mutex.Lock() + defer p.mutex.Unlock() + + switch { + case domain != "" && path != "": // bind new domain + p.bindDomainTranslators(domain, path, data) + case domain != "" && path == "": // delete domain + p.deleteDomain(domain) + } + + // return all bind domain + for k, fs := range p.domainMap { + domains = append(domains, k) + paths = append(paths, fs.FsName) + } + return +} + +func (p *domainManager) SetLocale(locale string) string { + p.mutex.Lock() + defer p.mutex.Unlock() + if locale != "" { + p.locale = locale + } + return p.locale +} + +func (p *domainManager) SetDomain(domain string) string { + p.mutex.Lock() + defer p.mutex.Unlock() + if domain != "" { + p.domain = domain + } + return p.domain +} + +func (p *domainManager) Getdata(name string) []byte { + return p.getdata(p.domain, name) +} + +func (p *domainManager) DGetdata(domain, name string) []byte { + return p.getdata(domain, name) +} + +func (p *domainManager) PNGettext(msgctxt, msgid, msgidPlural string, n int) string { + p.mutex.Lock() + defer p.mutex.Unlock() + return p.gettext(p.domain, msgctxt, msgid, msgidPlural, n) +} + +func (p *domainManager) DPNGettext(domain, msgctxt, msgid, msgidPlural string, n int) string { + p.mutex.Lock() + defer p.mutex.Unlock() + return p.gettext(domain, msgctxt, msgid, msgidPlural, n) +} + +func (p *domainManager) gettext(domain, msgctxt, msgid, msgidPlural string, n int) string { + if p.locale == "" || p.domain == "" { + return msgid + } + if _, ok := p.domainMap[domain]; !ok { + return msgid + } + if f, ok := p.trTextMap[p.makeTrMapKey(domain, p.locale)]; ok { + return f.PNGettext(msgctxt, msgid, msgidPlural, n) + } + return msgid +} + +func (p *domainManager) getdata(domain, name string) []byte { + if p.locale == "" || p.domain == "" { + return nil + } + if _, ok := p.domainMap[domain]; !ok { + return nil + } + if fs, ok := p.domainMap[domain]; ok { + if data, err := fs.LoadResourceFile(domain, p.locale, name); err == nil { + return data + } + if p.locale != "default" { + if data, err := fs.LoadResourceFile(domain, "default", name); err == nil { + return data + } + } + } + return nil +} diff --git a/vendor/github.com/chai2010/gettext-go/gettext/domain_helper.go b/vendor/github.com/chai2010/gettext-go/gettext/domain_helper.go new file mode 100644 index 0000000000..8dce58e655 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/gettext/domain_helper.go @@ -0,0 +1,50 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gettext + +import ( + "fmt" + "strings" +) + +func (p *domainManager) bindDomainTranslators(domain, path string, data []byte) { + if _, ok := p.domainMap[domain]; ok { + p.deleteDomain(domain) // delete old domain + } + fs := newFileSystem(path, data) + for locale, _ := range fs.LocaleMap { + trMapKey := p.makeTrMapKey(domain, locale) + if data, err := fs.LoadMessagesFile(domain, locale, ".mo"); err == nil { + p.trTextMap[trMapKey], _ = newMoTranslator( + fmt.Sprintf("%s_%s.mo", domain, locale), + data, + ) + continue + } + if data, err := fs.LoadMessagesFile(domain, locale, ".po"); err == nil { + p.trTextMap[trMapKey], _ = newPoTranslator( + fmt.Sprintf("%s_%s.po", domain, locale), + data, + ) + continue + } + p.trTextMap[p.makeTrMapKey(domain, locale)] = nilTranslator + } + p.domainMap[domain] = fs +} + +func (p *domainManager) deleteDomain(domain string) { + if _, ok := p.domainMap[domain]; !ok { + return + } + // delete all mo files + trMapKeyPrefix := p.makeTrMapKey(domain, "") + for k, _ := range p.trTextMap { + if strings.HasPrefix(k, trMapKeyPrefix) { + delete(p.trTextMap, k) + } + } + delete(p.domainMap, domain) +} diff --git a/vendor/github.com/chai2010/gettext-go/gettext/fs.go b/vendor/github.com/chai2010/gettext-go/gettext/fs.go new file mode 100644 index 0000000000..1c2e23c1d0 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/gettext/fs.go @@ -0,0 +1,187 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gettext + +import ( + "archive/zip" + "bytes" + "fmt" + "io/ioutil" + "log" + "os" + "strings" +) + +type fileSystem struct { + FsName string + FsRoot string + FsZipData []byte + LocaleMap map[string]bool +} + +func newFileSystem(path string, data []byte) *fileSystem { + fs := &fileSystem{ + FsName: path, + FsZipData: data, + } + if err := fs.init(); err != nil { + log.Printf("gettext-go: invalid domain, err = %v", err) + } + return fs +} + +func (p *fileSystem) init() error { + zipName := func(name string) string { + if x := strings.LastIndexAny(name, `\/`); x != -1 { + name = name[x+1:] + } + name = strings.TrimSuffix(name, ".zip") + return name + } + + // zip data + if len(p.FsZipData) != 0 { + p.FsRoot = zipName(p.FsName) + p.LocaleMap = p.lsZip(p.FsZipData) + return nil + } + + // local dir or zip file + fi, err := os.Stat(p.FsName) + if err != nil { + return err + } + + // local dir + if fi.IsDir() { + p.FsRoot = p.FsName + p.LocaleMap = p.lsDir(p.FsName) + return nil + } + + // local zip file + p.FsZipData, err = ioutil.ReadFile(p.FsName) + if err != nil { + return err + } + p.FsRoot = zipName(p.FsName) + p.LocaleMap = p.lsZip(p.FsZipData) + return nil +} + +func (p *fileSystem) LoadMessagesFile(domain, local, ext string) ([]byte, error) { + if len(p.FsZipData) == 0 { + trName := p.makeMessagesFileName(domain, local, ext) + rcData, err := ioutil.ReadFile(trName) + if err != nil { + return nil, err + } + return rcData, nil + } else { + r, err := zip.NewReader(bytes.NewReader(p.FsZipData), int64(len(p.FsZipData))) + if err != nil { + return nil, err + } + + trName := p.makeMessagesFileName(domain, local, ext) + for _, f := range r.File { + if f.Name != trName { + continue + } + rc, err := f.Open() + if err != nil { + return nil, err + } + rcData, err := ioutil.ReadAll(rc) + rc.Close() + return rcData, err + } + return nil, fmt.Errorf("not found") + } +} + +func (p *fileSystem) LoadResourceFile(domain, local, name string) ([]byte, error) { + if len(p.FsZipData) == 0 { + rcName := p.makeResourceFileName(domain, local, name) + rcData, err := ioutil.ReadFile(rcName) + if err != nil { + return nil, err + } + return rcData, nil + } else { + r, err := zip.NewReader(bytes.NewReader(p.FsZipData), int64(len(p.FsZipData))) + if err != nil { + return nil, err + } + + rcName := p.makeResourceFileName(domain, local, name) + for _, f := range r.File { + if f.Name != rcName { + continue + } + rc, err := f.Open() + if err != nil { + return nil, err + } + rcData, err := ioutil.ReadAll(rc) + rc.Close() + return rcData, err + } + return nil, fmt.Errorf("not found") + } +} + +func (p *fileSystem) makeMessagesFileName(domain, local, ext string) string { + return fmt.Sprintf("%s/%s/LC_MESSAGES/%s%s", p.FsRoot, local, domain, ext) +} + +func (p *fileSystem) makeResourceFileName(domain, local, name string) string { + return fmt.Sprintf("%s/%s/LC_RESOURCE/%s/%s", p.FsRoot, local, domain, name) +} + +func (p *fileSystem) lsZip(data []byte) map[string]bool { + r, err := zip.NewReader(bytes.NewReader(data), int64(len(data))) + if err != nil { + return nil + } + ssMap := make(map[string]bool) + for _, f := range r.File { + if x := strings.Index(f.Name, "LC_MESSAGES"); x != -1 { + s := strings.TrimRight(f.Name[:x], `\/`) + if x = strings.LastIndexAny(s, `\/`); x != -1 { + s = s[x+1:] + } + if s != "" { + ssMap[s] = true + } + continue + } + if x := strings.Index(f.Name, "LC_RESOURCE"); x != -1 { + s := strings.TrimRight(f.Name[:x], `\/`) + if x = strings.LastIndexAny(s, `\/`); x != -1 { + s = s[x+1:] + } + if s != "" { + ssMap[s] = true + } + continue + } + } + return ssMap +} + +func (p *fileSystem) lsDir(path string) map[string]bool { + list, err := ioutil.ReadDir(path) + if err != nil { + return nil + } + ssMap := make(map[string]bool) + for _, dir := range list { + if dir.IsDir() { + ssMap[dir.Name()] = true + } + } + return ssMap +} diff --git a/vendor/github.com/chai2010/gettext-go/gettext/gettext.go b/vendor/github.com/chai2010/gettext-go/gettext/gettext.go new file mode 100644 index 0000000000..ca14065b22 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/gettext/gettext.go @@ -0,0 +1,184 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gettext + +var ( + defaultManager = newDomainManager() +) + +var ( + DefaultLocale = getDefaultLocale() // use $(LC_MESSAGES) or $(LANG) or "default" +) + +// SetLocale sets and queries the program's current locale. +// +// If the locale is not empty string, set the new local. +// +// If the locale is empty string, don't change anything. +// +// Returns is the current locale. +// +// Examples: +// SetLocale("") // get locale: return DefaultLocale +// SetLocale("zh_CN") // set locale: return zh_CN +// SetLocale("") // get locale: return zh_CN +func SetLocale(locale string) string { + return defaultManager.SetLocale(locale) +} + +// BindTextdomain sets and queries program's domains. +// +// If the domain and path are all not empty string, bind the new domain. +// If the domain already exists, return error. +// +// If the domain is not empty string, but the path is the empty string, +// delete the domain. +// If the domain don't exists, return error. +// +// If the domain and the path are all empty string, don't change anything. +// +// Returns is the all bind domains. +// +// Examples: +// BindTextdomain("poedit", "local", nil) // bind "poedit" domain +// BindTextdomain("", "", nil) // return all domains +// BindTextdomain("poedit", "", nil) // delete "poedit" domain +// BindTextdomain("", "", nil) // return all domains +// +// Use zip file: +// BindTextdomain("poedit", "local.zip", nil) // bind "poedit" domain +// BindTextdomain("poedit", "local.zip", zipData) // bind "poedit" domain +// +func BindTextdomain(domain, path string, zipData []byte) (domains, paths []string) { + return defaultManager.Bind(domain, path, zipData) +} + +// Textdomain sets and retrieves the current message domain. +// +// If the domain is not empty string, set the new domains. +// +// If the domain is empty string, don't change anything. +// +// Returns is the all used domains. +// +// Examples: +// Textdomain("poedit") // set domain: poedit +// Textdomain("") // get domain: return poedit +func Textdomain(domain string) string { + return defaultManager.SetDomain(domain) +} + +// Gettext attempt to translate a text string into the user's native language, +// by looking up the translation in a message catalog. +// +// It use the caller's function name as the msgctxt. +// +// Examples: +// func Foo() { +// msg := gettext.Gettext("Hello") // msgctxt is "some/package/name.Foo" +// } +func Gettext(msgid string) string { + return PGettext(callerName(2), msgid) +} + +// Getdata attempt to translate a resource file into the user's native language, +// by looking up the translation in a message catalog. +// +// Examples: +// func Foo() { +// Textdomain("hello") +// BindTextdomain("hello", "local.zip", nilOrZipData) +// poems := gettext.Getdata("poems.txt") +// } +func Getdata(name string) []byte { + return defaultManager.Getdata(name) +} + +// NGettext attempt to translate a text string into the user's native language, +// by looking up the appropriate plural form of the translation in a message +// catalog. +// +// It use the caller's function name as the msgctxt. +// +// Examples: +// func Foo() { +// msg := gettext.NGettext("%d people", "%d peoples", 2) +// } +func NGettext(msgid, msgidPlural string, n int) string { + return PNGettext(callerName(2), msgid, msgidPlural, n) +} + +// PGettext attempt to translate a text string into the user's native language, +// by looking up the translation in a message catalog. +// +// Examples: +// func Foo() { +// msg := gettext.PGettext("gettext-go.example", "Hello") // msgctxt is "gettext-go.example" +// } +func PGettext(msgctxt, msgid string) string { + return PNGettext(msgctxt, msgid, "", 0) +} + +// PNGettext attempt to translate a text string into the user's native language, +// by looking up the appropriate plural form of the translation in a message +// catalog. +// +// Examples: +// func Foo() { +// msg := gettext.PNGettext("gettext-go.example", "%d people", "%d peoples", 2) +// } +func PNGettext(msgctxt, msgid, msgidPlural string, n int) string { + return defaultManager.PNGettext(msgctxt, msgid, msgidPlural, n) +} + +// DGettext like Gettext(), but looking up the message in the specified domain. +// +// Examples: +// func Foo() { +// msg := gettext.DGettext("poedit", "Hello") +// } +func DGettext(domain, msgid string) string { + return DPGettext(domain, callerName(2), msgid) +} + +// DNGettext like NGettext(), but looking up the message in the specified domain. +// +// Examples: +// func Foo() { +// msg := gettext.PNGettext("poedit", "gettext-go.example", "%d people", "%d peoples", 2) +// } +func DNGettext(domain, msgid, msgidPlural string, n int) string { + return DPNGettext(domain, callerName(2), msgid, msgidPlural, n) +} + +// DPGettext like PGettext(), but looking up the message in the specified domain. +// +// Examples: +// func Foo() { +// msg := gettext.DPGettext("poedit", "gettext-go.example", "Hello") +// } +func DPGettext(domain, msgctxt, msgid string) string { + return DPNGettext(domain, msgctxt, msgid, "", 0) +} + +// DPNGettext like PNGettext(), but looking up the message in the specified domain. +// +// Examples: +// func Foo() { +// msg := gettext.DPNGettext("poedit", "gettext-go.example", "%d people", "%d peoples", 2) +// } +func DPNGettext(domain, msgctxt, msgid, msgidPlural string, n int) string { + return defaultManager.DPNGettext(domain, msgctxt, msgid, msgidPlural, n) +} + +// DGetdata like Getdata(), but looking up the resource in the specified domain. +// +// Examples: +// func Foo() { +// msg := gettext.DGetdata("hello", "poems.txt") +// } +func DGetdata(domain, name string) []byte { + return defaultManager.DGetdata(domain, name) +} diff --git a/vendor/github.com/chai2010/gettext-go/gettext/local.go b/vendor/github.com/chai2010/gettext-go/gettext/local.go new file mode 100644 index 0000000000..179a392fe2 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/gettext/local.go @@ -0,0 +1,34 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gettext + +import ( + "os" + "strings" +) + +func getDefaultLocale() string { + if v := os.Getenv("LC_MESSAGES"); v != "" { + return simplifiedLocale(v) + } + if v := os.Getenv("LANG"); v != "" { + return simplifiedLocale(v) + } + return "default" +} + +func simplifiedLocale(lang string) string { + // en_US/en_US.UTF-8/zh_CN/zh_TW/el_GR@euro/... + if idx := strings.Index(lang, ":"); idx != -1 { + lang = lang[:idx] + } + if idx := strings.Index(lang, "@"); idx != -1 { + lang = lang[:idx] + } + if idx := strings.Index(lang, "."); idx != -1 { + lang = lang[:idx] + } + return strings.TrimSpace(lang) +} diff --git a/vendor/github.com/chai2010/gettext-go/gettext/mo/doc.go b/vendor/github.com/chai2010/gettext-go/gettext/mo/doc.go new file mode 100644 index 0000000000..9677680631 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/gettext/mo/doc.go @@ -0,0 +1,74 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package mo provides support for reading and writing GNU MO file. + +Examples: + import ( + "github.com/chai2010/gettext-go/gettext/mo" + ) + + func main() { + moFile, err := mo.Load("test.mo") + if err != nil { + log.Fatal(err) + } + fmt.Printf("%v", moFile) + } + +GNU MO file struct: + + byte + +------------------------------------------+ + 0 | magic number = 0x950412de | + | | + 4 | file format revision = 0 | + | | + 8 | number of strings | == N + | | + 12 | offset of table with original strings | == O + | | + 16 | offset of table with translation strings | == T + | | + 20 | size of hashing table | == S + | | + 24 | offset of hashing table | == H + | | + . . + . (possibly more entries later) . + . . + | | + O | length & offset 0th string ----------------. + O + 8 | length & offset 1st string ------------------. + ... ... | | + O + ((N-1)*8)| length & offset (N-1)th string | | | + | | | | + T | length & offset 0th translation ---------------. + T + 8 | length & offset 1st translation -----------------. + ... ... | | | | + T + ((N-1)*8)| length & offset (N-1)th translation | | | | | + | | | | | | + H | start hash table | | | | | + ... ... | | | | + H + S * 4 | end hash table | | | | | + | | | | | | + | NUL terminated 0th string <----------------' | | | + | | | | | + | NUL terminated 1st string <------------------' | | + | | | | + ... ... | | + | | | | + | NUL terminated 0th translation <---------------' | + | | | + | NUL terminated 1st translation <-----------------' + | | + ... ... + | | + +------------------------------------------+ + +The GNU MO file specification is at +http://www.gnu.org/software/gettext/manual/html_node/MO-Files.html. +*/ +package mo diff --git a/vendor/github.com/chai2010/gettext-go/gettext/mo/encoder.go b/vendor/github.com/chai2010/gettext-go/gettext/mo/encoder.go new file mode 100644 index 0000000000..9b1c240b4f --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/gettext/mo/encoder.go @@ -0,0 +1,124 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mo + +import ( + "bytes" + "encoding/binary" + "sort" + "strings" +) + +type moHeader struct { + MagicNumber uint32 + MajorVersion uint16 + MinorVersion uint16 + MsgIdCount uint32 + MsgIdOffset uint32 + MsgStrOffset uint32 + HashSize uint32 + HashOffset uint32 +} + +type moStrPos struct { + Size uint32 // must keep fields order + Addr uint32 +} + +func encodeFile(f *File) []byte { + hdr := &moHeader{ + MagicNumber: MoMagicLittleEndian, + } + data := encodeData(hdr, f) + data = append(encodeHeader(hdr), data...) + return data +} + +// encode data and init moHeader +func encodeData(hdr *moHeader, f *File) []byte { + msgList := []Message{f.MimeHeader.toMessage()} + for _, v := range f.Messages { + if len(v.MsgId) == 0 { + continue + } + if len(v.MsgStr) == 0 && len(v.MsgStrPlural) == 0 { + continue + } + msgList = append(msgList, v) + } + sort.Sort(byMessages(msgList)) + + var buf bytes.Buffer + var msgIdPosList = make([]moStrPos, len(msgList)) + var msgStrPosList = make([]moStrPos, len(msgList)) + for i, v := range msgList { + // write msgid + msgId := encodeMsgId(v) + msgIdPosList[i].Addr = uint32(buf.Len() + MoHeaderSize) + msgIdPosList[i].Size = uint32(len(msgId)) + buf.WriteString(msgId) + // write msgstr + msgStr := encodeMsgStr(v) + msgStrPosList[i].Addr = uint32(buf.Len() + MoHeaderSize) + msgStrPosList[i].Size = uint32(len(msgStr)) + buf.WriteString(msgStr) + } + + hdr.MsgIdOffset = uint32(buf.Len() + MoHeaderSize) + binary.Write(&buf, binary.LittleEndian, msgIdPosList) + hdr.MsgStrOffset = uint32(buf.Len() + MoHeaderSize) + binary.Write(&buf, binary.LittleEndian, msgStrPosList) + + hdr.MsgIdCount = uint32(len(msgList)) + return buf.Bytes() +} + +// must called after encodeData +func encodeHeader(hdr *moHeader) []byte { + var buf bytes.Buffer + binary.Write(&buf, binary.LittleEndian, hdr) + return buf.Bytes() +} + +func encodeMsgId(v Message) string { + if v.MsgContext != "" && v.MsgIdPlural != "" { + return v.MsgContext + EotSeparator + v.MsgId + NulSeparator + v.MsgIdPlural + } + if v.MsgContext != "" && v.MsgIdPlural == "" { + return v.MsgContext + EotSeparator + v.MsgId + } + if v.MsgContext == "" && v.MsgIdPlural != "" { + return v.MsgId + NulSeparator + v.MsgIdPlural + } + return v.MsgId +} + +func encodeMsgStr(v Message) string { + if v.MsgIdPlural != "" { + return strings.Join(v.MsgStrPlural, NulSeparator) + } + return v.MsgStr +} + +type byMessages []Message + +func (d byMessages) Len() int { + return len(d) +} +func (d byMessages) Less(i, j int) bool { + if a, b := d[i].MsgContext, d[j].MsgContext; a != b { + return a < b + } + if a, b := d[i].MsgId, d[j].MsgId; a != b { + return a < b + } + if a, b := d[i].MsgIdPlural, d[j].MsgIdPlural; a != b { + return a < b + } + return false +} +func (d byMessages) Swap(i, j int) { + d[i], d[j] = d[j], d[i] +} diff --git a/vendor/github.com/chai2010/gettext-go/gettext/mo/file.go b/vendor/github.com/chai2010/gettext-go/gettext/mo/file.go new file mode 100644 index 0000000000..b49a77b42a --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/gettext/mo/file.go @@ -0,0 +1,193 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mo + +import ( + "bytes" + "encoding/binary" + "fmt" + "io/ioutil" + "strings" +) + +const ( + MoHeaderSize = 28 + MoMagicLittleEndian = 0x950412de + MoMagicBigEndian = 0xde120495 + + EotSeparator = "\x04" // msgctxt and msgid separator + NulSeparator = "\x00" // msgid and msgstr separator +) + +// File represents an MO File. +// +// See http://www.gnu.org/software/gettext/manual/html_node/MO-Files.html +type File struct { + MagicNumber uint32 + MajorVersion uint16 + MinorVersion uint16 + MsgIdCount uint32 + MsgIdOffset uint32 + MsgStrOffset uint32 + HashSize uint32 + HashOffset uint32 + MimeHeader Header + Messages []Message +} + +// Load loads a named mo file. +func Load(name string) (*File, error) { + data, err := ioutil.ReadFile(name) + if err != nil { + return nil, err + } + return LoadData(data) +} + +// LoadData loads mo file format data. +func LoadData(data []byte) (*File, error) { + r := bytes.NewReader(data) + + var magicNumber uint32 + if err := binary.Read(r, binary.LittleEndian, &magicNumber); err != nil { + return nil, fmt.Errorf("gettext: %v", err) + } + var bo binary.ByteOrder + switch magicNumber { + case MoMagicLittleEndian: + bo = binary.LittleEndian + case MoMagicBigEndian: + bo = binary.BigEndian + default: + return nil, fmt.Errorf("gettext: %v", "invalid magic number") + } + + var header struct { + MajorVersion uint16 + MinorVersion uint16 + MsgIdCount uint32 + MsgIdOffset uint32 + MsgStrOffset uint32 + HashSize uint32 + HashOffset uint32 + } + if err := binary.Read(r, bo, &header); err != nil { + return nil, fmt.Errorf("gettext: %v", err) + } + if v := header.MajorVersion; v != 0 && v != 1 { + return nil, fmt.Errorf("gettext: %v", "invalid version number") + } + if v := header.MinorVersion; v != 0 && v != 1 { + return nil, fmt.Errorf("gettext: %v", "invalid version number") + } + + msgIdStart := make([]uint32, header.MsgIdCount) + msgIdLen := make([]uint32, header.MsgIdCount) + if _, err := r.Seek(int64(header.MsgIdOffset), 0); err != nil { + return nil, fmt.Errorf("gettext: %v", err) + } + for i := 0; i < int(header.MsgIdCount); i++ { + if err := binary.Read(r, bo, &msgIdLen[i]); err != nil { + return nil, fmt.Errorf("gettext: %v", err) + } + if err := binary.Read(r, bo, &msgIdStart[i]); err != nil { + return nil, fmt.Errorf("gettext: %v", err) + } + } + + msgStrStart := make([]int32, header.MsgIdCount) + msgStrLen := make([]int32, header.MsgIdCount) + if _, err := r.Seek(int64(header.MsgStrOffset), 0); err != nil { + return nil, fmt.Errorf("gettext: %v", err) + } + for i := 0; i < int(header.MsgIdCount); i++ { + if err := binary.Read(r, bo, &msgStrLen[i]); err != nil { + return nil, fmt.Errorf("gettext: %v", err) + } + if err := binary.Read(r, bo, &msgStrStart[i]); err != nil { + return nil, fmt.Errorf("gettext: %v", err) + } + } + + file := &File{ + MagicNumber: magicNumber, + MajorVersion: header.MajorVersion, + MinorVersion: header.MinorVersion, + MsgIdCount: header.MsgIdCount, + MsgIdOffset: header.MsgIdOffset, + MsgStrOffset: header.MsgStrOffset, + HashSize: header.HashSize, + HashOffset: header.HashOffset, + } + for i := 0; i < int(header.MsgIdCount); i++ { + if _, err := r.Seek(int64(msgIdStart[i]), 0); err != nil { + return nil, fmt.Errorf("gettext: %v", err) + } + msgIdData := make([]byte, msgIdLen[i]) + if _, err := r.Read(msgIdData); err != nil { + return nil, fmt.Errorf("gettext: %v", err) + } + + if _, err := r.Seek(int64(msgStrStart[i]), 0); err != nil { + return nil, fmt.Errorf("gettext: %v", err) + } + msgStrData := make([]byte, msgStrLen[i]) + if _, err := r.Read(msgStrData); err != nil { + return nil, fmt.Errorf("gettext: %v", err) + } + + if len(msgIdData) == 0 { + var msg = Message{ + MsgId: string(msgIdData), + MsgStr: string(msgStrData), + } + file.MimeHeader.fromMessage(&msg) + } else { + var msg = Message{ + MsgId: string(msgIdData), + MsgStr: string(msgStrData), + } + // Is this a context message? + if idx := strings.Index(msg.MsgId, EotSeparator); idx != -1 { + msg.MsgContext, msg.MsgId = msg.MsgId[:idx], msg.MsgId[idx+1:] + } + // Is this a plural message? + if idx := strings.Index(msg.MsgId, NulSeparator); idx != -1 { + msg.MsgId, msg.MsgIdPlural = msg.MsgId[:idx], msg.MsgId[idx+1:] + msg.MsgStrPlural = strings.Split(msg.MsgStr, NulSeparator) + msg.MsgStr = "" + } + file.Messages = append(file.Messages, msg) + } + } + + return file, nil +} + +// Save saves a mo file. +func (f *File) Save(name string) error { + return ioutil.WriteFile(name, f.Data(), 0666) +} + +// Save returns a mo file format data. +func (f *File) Data() []byte { + return encodeFile(f) +} + +// String returns the po format file string. +func (f *File) String() string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "# version: %d.%d\n", f.MajorVersion, f.MinorVersion) + fmt.Fprintf(&buf, "%s\n", f.MimeHeader.String()) + fmt.Fprintf(&buf, "\n") + + for k, v := range f.Messages { + fmt.Fprintf(&buf, `msgid "%v"`+"\n", k) + fmt.Fprintf(&buf, `msgstr "%s"`+"\n", v.MsgStr) + fmt.Fprintf(&buf, "\n") + } + + return buf.String() +} diff --git a/vendor/github.com/chai2010/gettext-go/gettext/mo/header.go b/vendor/github.com/chai2010/gettext-go/gettext/mo/header.go new file mode 100644 index 0000000000..d8c7a5e3a3 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/gettext/mo/header.go @@ -0,0 +1,109 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mo + +import ( + "bytes" + "fmt" + "strings" +) + +// Header is the initial comments "SOME DESCRIPTIVE TITLE", "YEAR" +// and "FIRST AUTHOR , YEAR" ought to be replaced by sensible information. +// +// See http://www.gnu.org/software/gettext/manual/html_node/Header-Entry.html#Header-Entry +type Header struct { + ProjectIdVersion string // Project-Id-Version: PACKAGE VERSION + ReportMsgidBugsTo string // Report-Msgid-Bugs-To: FIRST AUTHOR + POTCreationDate string // POT-Creation-Date: YEAR-MO-DA HO:MI+ZONE + PORevisionDate string // PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE + LastTranslator string // Last-Translator: FIRST AUTHOR + LanguageTeam string // Language-Team: golang-china + Language string // Language: zh_CN + MimeVersion string // MIME-Version: 1.0 + ContentType string // Content-Type: text/plain; charset=UTF-8 + ContentTransferEncoding string // Content-Transfer-Encoding: 8bit + PluralForms string // Plural-Forms: nplurals=2; plural=n == 1 ? 0 : 1; + XGenerator string // X-Generator: Poedit 1.5.5 + UnknowFields map[string]string +} + +func (p *Header) fromMessage(msg *Message) { + if msg.MsgId != "" || msg.MsgStr == "" { + return + } + lines := strings.Split(msg.MsgStr, "\n") + for i := 0; i < len(lines); i++ { + idx := strings.Index(lines[i], ":") + if idx < 0 { + continue + } + key := strings.TrimSpace(lines[i][:idx]) + val := strings.TrimSpace(lines[i][idx+1:]) + switch strings.ToUpper(key) { + case strings.ToUpper("Project-Id-Version"): + p.ProjectIdVersion = val + case strings.ToUpper("Report-Msgid-Bugs-To"): + p.ReportMsgidBugsTo = val + case strings.ToUpper("POT-Creation-Date"): + p.POTCreationDate = val + case strings.ToUpper("PO-Revision-Date"): + p.PORevisionDate = val + case strings.ToUpper("Last-Translator"): + p.LastTranslator = val + case strings.ToUpper("Language-Team"): + p.LanguageTeam = val + case strings.ToUpper("Language"): + p.Language = val + case strings.ToUpper("MIME-Version"): + p.MimeVersion = val + case strings.ToUpper("Content-Type"): + p.ContentType = val + case strings.ToUpper("Content-Transfer-Encoding"): + p.ContentTransferEncoding = val + case strings.ToUpper("Plural-Forms"): + p.PluralForms = val + case strings.ToUpper("X-Generator"): + p.XGenerator = val + default: + if p.UnknowFields == nil { + p.UnknowFields = make(map[string]string) + } + p.UnknowFields[key] = val + } + } +} + +func (p *Header) toMessage() Message { + return Message{ + MsgStr: p.String(), + } +} + +// String returns the po format header string. +func (p Header) String() string { + var buf bytes.Buffer + fmt.Fprintf(&buf, `msgid ""`+"\n") + fmt.Fprintf(&buf, `msgstr ""`+"\n") + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Project-Id-Version", p.ProjectIdVersion) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Report-Msgid-Bugs-To", p.ReportMsgidBugsTo) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "POT-Creation-Date", p.POTCreationDate) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "PO-Revision-Date", p.PORevisionDate) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Last-Translator", p.LastTranslator) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Language-Team", p.LanguageTeam) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Language", p.Language) + if p.MimeVersion != "" { + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "MIME-Version", p.MimeVersion) + } + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Content-Type", p.ContentType) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Content-Transfer-Encoding", p.ContentTransferEncoding) + if p.XGenerator != "" { + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "X-Generator", p.XGenerator) + } + for k, v := range p.UnknowFields { + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", k, v) + } + return buf.String() +} diff --git a/vendor/github.com/chai2010/gettext-go/gettext/mo/message.go b/vendor/github.com/chai2010/gettext-go/gettext/mo/message.go new file mode 100644 index 0000000000..91ad79bece --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/gettext/mo/message.go @@ -0,0 +1,39 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mo + +import ( + "bytes" + "fmt" +) + +// A MO file is made up of many entries, +// each entry holding the relation between an original untranslated string +// and its corresponding translation. +// +// See http://www.gnu.org/software/gettext/manual/html_node/MO-Files.html +type Message struct { + MsgContext string // msgctxt context + MsgId string // msgid untranslated-string + MsgIdPlural string // msgid_plural untranslated-string-plural + MsgStr string // msgstr translated-string + MsgStrPlural []string // msgstr[0] translated-string-case-0 +} + +// String returns the po format entry string. +func (p Message) String() string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "msgid %s", encodePoString(p.MsgId)) + if p.MsgIdPlural != "" { + fmt.Fprintf(&buf, "msgid_plural %s", encodePoString(p.MsgIdPlural)) + } + if p.MsgStr != "" { + fmt.Fprintf(&buf, "msgstr %s", encodePoString(p.MsgStr)) + } + for i := 0; i < len(p.MsgStrPlural); i++ { + fmt.Fprintf(&buf, "msgstr[%d] %s", i, encodePoString(p.MsgStrPlural[i])) + } + return buf.String() +} diff --git a/vendor/github.com/chai2010/gettext-go/gettext/mo/util.go b/vendor/github.com/chai2010/gettext-go/gettext/mo/util.go new file mode 100644 index 0000000000..3804511053 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/gettext/mo/util.go @@ -0,0 +1,110 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mo + +import ( + "bytes" + "strings" +) + +func decodePoString(text string) string { + lines := strings.Split(text, "\n") + for i := 0; i < len(lines); i++ { + left := strings.Index(lines[i], `"`) + right := strings.LastIndex(lines[i], `"`) + if left < 0 || right < 0 || left == right { + lines[i] = "" + continue + } + line := lines[i][left+1 : right] + data := make([]byte, 0, len(line)) + for i := 0; i < len(line); i++ { + if line[i] != '\\' { + data = append(data, line[i]) + continue + } + if i+1 >= len(line) { + break + } + switch line[i+1] { + case 'n': // \\n -> \n + data = append(data, '\n') + i++ + case 't': // \\t -> \n + data = append(data, '\t') + i++ + case '\\': // \\\ -> ? + data = append(data, '\\') + i++ + } + } + lines[i] = string(data) + } + return strings.Join(lines, "") +} + +func encodePoString(text string) string { + var buf bytes.Buffer + lines := strings.Split(text, "\n") + for i := 0; i < len(lines); i++ { + if lines[i] == "" { + if i != len(lines)-1 { + buf.WriteString(`"\n"` + "\n") + } + continue + } + buf.WriteRune('"') + for _, r := range lines[i] { + switch r { + case '\\': + buf.WriteString(`\\`) + case '"': + buf.WriteString(`\"`) + case '\n': + buf.WriteString(`\n`) + case '\t': + buf.WriteString(`\t`) + default: + buf.WriteRune(r) + } + } + buf.WriteString(`\n"` + "\n") + } + return buf.String() +} + +func encodeCommentPoString(text string) string { + var buf bytes.Buffer + lines := strings.Split(text, "\n") + if len(lines) > 1 { + buf.WriteString(`""` + "\n") + } + for i := 0; i < len(lines); i++ { + if len(lines) > 0 { + buf.WriteString("#| ") + } + buf.WriteRune('"') + for _, r := range lines[i] { + switch r { + case '\\': + buf.WriteString(`\\`) + case '"': + buf.WriteString(`\"`) + case '\n': + buf.WriteString(`\n`) + case '\t': + buf.WriteString(`\t`) + default: + buf.WriteRune(r) + } + } + if i < len(lines)-1 { + buf.WriteString(`\n"` + "\n") + } else { + buf.WriteString(`"`) + } + } + return buf.String() +} diff --git a/vendor/github.com/chai2010/gettext-go/gettext/plural/doc.go b/vendor/github.com/chai2010/gettext-go/gettext/plural/doc.go new file mode 100644 index 0000000000..5641e2c3e7 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/gettext/plural/doc.go @@ -0,0 +1,36 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package plural provides standard plural formulas. + +Examples: + import ( + "code.google.com/p/gettext-go/gettext/plural" + ) + + func main() { + enFormula := plural.Formula("en_US") + xxFormula := plural.Formula("zh_CN") + + fmt.Printf("%s: %d\n", "en", enFormula(0)) + fmt.Printf("%s: %d\n", "en", enFormula(1)) + fmt.Printf("%s: %d\n", "en", enFormula(2)) + fmt.Printf("%s: %d\n", "??", xxFormula(0)) + fmt.Printf("%s: %d\n", "??", xxFormula(1)) + fmt.Printf("%s: %d\n", "??", xxFormula(2)) + fmt.Printf("%s: %d\n", "??", xxFormula(9)) + // Output: + // en: 0 + // en: 0 + // en: 1 + // ??: 0 + // ??: 0 + // ??: 1 + // ??: 8 + } + +See http://www.gnu.org/software/gettext/manual/html_node/Plural-forms.html +*/ +package plural diff --git a/vendor/github.com/chai2010/gettext-go/gettext/plural/formula.go b/vendor/github.com/chai2010/gettext-go/gettext/plural/formula.go new file mode 100644 index 0000000000..679a1cd50d --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/gettext/plural/formula.go @@ -0,0 +1,181 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package plural + +import ( + "strings" +) + +// Formula provides the language's standard plural formula. +func Formula(lang string) func(n int) int { + if idx := index(lang); idx != -1 { + return formulaTable[fmtForms(FormsTable[idx].Value)] + } + if idx := index("??"); idx != -1 { + return formulaTable[fmtForms(FormsTable[idx].Value)] + } + return func(n int) int { + return n + } +} + +func index(lang string) int { + for i := 0; i < len(FormsTable); i++ { + if strings.HasPrefix(lang, FormsTable[i].Lang) { + return i + } + } + return -1 +} + +func fmtForms(forms string) string { + forms = strings.TrimSpace(forms) + forms = strings.Replace(forms, " ", "", -1) + return forms +} + +var formulaTable = map[string]func(n int) int{ + fmtForms("nplurals=n; plural=n-1;"): func(n int) int { + if n > 0 { + return n - 1 + } + return 0 + }, + fmtForms("nplurals=1; plural=0;"): func(n int) int { + return 0 + }, + fmtForms("nplurals=2; plural=(n != 1);"): func(n int) int { + if n <= 1 { + return 0 + } + return 1 + }, + fmtForms("nplurals=2; plural=(n > 1);"): func(n int) int { + if n <= 1 { + return 0 + } + return 1 + }, + fmtForms("nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n != 0 ? 1 : 2);"): func(n int) int { + if n%10 == 1 && n%100 != 11 { + return 0 + } + if n != 0 { + return 1 + } + return 2 + }, + fmtForms("nplurals=3; plural=n==1 ? 0 : n==2 ? 1 : 2;"): func(n int) int { + if n == 1 { + return 0 + } + if n == 2 { + return 1 + } + return 2 + }, + fmtForms("nplurals=3; plural=n==1 ? 0 : (n==0 || (n%100 > 0 && n%100 < 20)) ? 1 : 2;"): func(n int) int { + if n == 1 { + return 0 + } + if n == 0 || (n%100 > 0 && n%100 < 20) { + return 1 + } + return 2 + }, + fmtForms("nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && (n%100<10 || n%100>=20) ? 1 : 2);"): func(n int) int { + if n%10 == 1 && n%100 != 11 { + return 0 + } + if n%10 >= 2 && (n%100 < 10 || n%100 >= 20) { + return 1 + } + return 2 + }, + fmtForms("nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"): func(n int) int { + if n%10 == 1 && n%100 != 11 { + return 0 + } + if n%10 >= 2 && n%10 <= 4 && (n%100 < 10 || n%100 >= 20) { + return 1 + } + return 2 + }, + fmtForms("nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"): func(n int) int { + if n%10 == 1 && n%100 != 11 { + return 0 + } + if n%10 >= 2 && n%10 <= 4 && (n%100 < 10 || n%100 >= 20) { + return 1 + } + return 2 + }, + fmtForms("nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"): func(n int) int { + if n%10 == 1 && n%100 != 11 { + return 0 + } + if n%10 >= 2 && n%10 <= 4 && (n%100 < 10 || n%100 >= 20) { + return 1 + } + return 2 + }, + fmtForms("nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"): func(n int) int { + if n%10 == 1 && n%100 != 11 { + return 0 + } + if n%10 >= 2 && n%10 <= 4 && (n%100 < 10 || n%100 >= 20) { + return 1 + } + return 2 + }, + fmtForms("nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"): func(n int) int { + if n%10 == 1 && n%100 != 11 { + return 0 + } + if n%10 >= 2 && n%10 <= 4 && (n%100 < 10 || n%100 >= 20) { + return 1 + } + return 2 + }, + fmtForms("nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2;"): func(n int) int { + if n == 1 { + return 0 + } + if n >= 2 && n <= 4 { + return 1 + } + return 2 + }, + fmtForms("nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2;"): func(n int) int { + if n == 1 { + return 0 + } + if n >= 2 && n <= 4 { + return 1 + } + return 2 + }, + fmtForms("nplurals=3; plural=(n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"): func(n int) int { + if n == 1 { + return 0 + } + if n%10 >= 2 && n%10 <= 4 && (n%100 < 10 || n%100 >= 20) { + return 1 + } + return 2 + }, + fmtForms("nplurals=4; plural=(n%100==1 ? 0 : n%100==2 ? 1 : n%100==3 || n%100==4 ? 2 : 3);"): func(n int) int { + if n%100 == 1 { + return 0 + } + if n%100 == 2 { + return 1 + } + if n%100 == 3 || n%100 == 4 { + return 2 + } + return 3 + }, +} diff --git a/vendor/github.com/chai2010/gettext-go/gettext/plural/table.go b/vendor/github.com/chai2010/gettext-go/gettext/plural/table.go new file mode 100644 index 0000000000..cdc50d2110 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/gettext/plural/table.go @@ -0,0 +1,55 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package plural + +// FormsTable are standard hard-coded plural rules. +// The application developers and the translators need to understand them. +// +// See GNU's gettext library source code: gettext/gettext-tools/src/plural-table.c +var FormsTable = []struct { + Lang string + Language string + Value string +}{ + {"??", "Unknown", "nplurals=1; plural=0;"}, + {"ja", "Japanese", "nplurals=1; plural=0;"}, + {"vi", "Vietnamese", "nplurals=1; plural=0;"}, + {"ko", "Korean", "nplurals=1; plural=0;"}, + {"en", "English", "nplurals=2; plural=(n != 1);"}, + {"de", "German", "nplurals=2; plural=(n != 1);"}, + {"nl", "Dutch", "nplurals=2; plural=(n != 1);"}, + {"sv", "Swedish", "nplurals=2; plural=(n != 1);"}, + {"da", "Danish", "nplurals=2; plural=(n != 1);"}, + {"no", "Norwegian", "nplurals=2; plural=(n != 1);"}, + {"nb", "Norwegian Bokmal", "nplurals=2; plural=(n != 1);"}, + {"nn", "Norwegian Nynorsk", "nplurals=2; plural=(n != 1);"}, + {"fo", "Faroese", "nplurals=2; plural=(n != 1);"}, + {"es", "Spanish", "nplurals=2; plural=(n != 1);"}, + {"pt", "Portuguese", "nplurals=2; plural=(n != 1);"}, + {"it", "Italian", "nplurals=2; plural=(n != 1);"}, + {"bg", "Bulgarian", "nplurals=2; plural=(n != 1);"}, + {"el", "Greek", "nplurals=2; plural=(n != 1);"}, + {"fi", "Finnish", "nplurals=2; plural=(n != 1);"}, + {"et", "Estonian", "nplurals=2; plural=(n != 1);"}, + {"he", "Hebrew", "nplurals=2; plural=(n != 1);"}, + {"eo", "Esperanto", "nplurals=2; plural=(n != 1);"}, + {"hu", "Hungarian", "nplurals=2; plural=(n != 1);"}, + {"tr", "Turkish", "nplurals=2; plural=(n != 1);"}, + {"pt_BR", "Brazilian", "nplurals=2; plural=(n > 1);"}, + {"fr", "French", "nplurals=2; plural=(n > 1);"}, + {"lv", "Latvian", "nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n != 0 ? 1 : 2);"}, + {"ga", "Irish", "nplurals=3; plural=n==1 ? 0 : n==2 ? 1 : 2;"}, + {"ro", "Romanian", "nplurals=3; plural=n==1 ? 0 : (n==0 || (n%100 > 0 && n%100 < 20)) ? 1 : 2;"}, + {"lt", "Lithuanian", "nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && (n%100<10 || n%100>=20) ? 1 : 2);"}, + {"ru", "Russian", "nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"}, + {"uk", "Ukrainian", "nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"}, + {"be", "Belarusian", "nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"}, + {"sr", "Serbian", "nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"}, + {"hr", "Croatian", "nplurals=3; plural=(n%10==1 && n%100!=11 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"}, + {"cs", "Czech", "nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2;"}, + {"sk", "Slovak", "nplurals=3; plural=(n==1) ? 0 : (n>=2 && n<=4) ? 1 : 2;"}, + {"pl", "Polish", "nplurals=3; plural=(n==1 ? 0 : n%10>=2 && n%10<=4 && (n%100<10 || n%100>=20) ? 1 : 2);"}, + {"sl", "Slovenian", "nplurals=4; plural=(n%100==1 ? 0 : n%100==2 ? 1 : n%100==3 || n%100==4 ? 2 : 3);"}, +} diff --git a/vendor/github.com/chai2010/gettext-go/gettext/po/comment.go b/vendor/github.com/chai2010/gettext-go/gettext/po/comment.go new file mode 100644 index 0000000000..d4abe7c106 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/gettext/po/comment.go @@ -0,0 +1,270 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package po + +import ( + "bytes" + "fmt" + "io" + "strconv" + "strings" +) + +// Comment represents every message's comments. +type Comment struct { + StartLine int // comment start line + TranslatorComment string // # translator-comments // TrimSpace + ExtractedComment string // #. extracted-comments + ReferenceFile []string // #: src/msgcmp.c:338 src/po-lex.c:699 + ReferenceLine []int // #: src/msgcmp.c:338 src/po-lex.c:699 + Flags []string // #, fuzzy,c-format,range:0..10 + PrevMsgContext string // #| msgctxt previous-context + PrevMsgId string // #| msgid previous-untranslated-string +} + +func (p *Comment) less(q *Comment) bool { + if p.StartLine != 0 || q.StartLine != 0 { + return p.StartLine < q.StartLine + } + if a, b := len(p.ReferenceFile), len(q.ReferenceFile); a != b { + return a < b + } + for i := 0; i < len(p.ReferenceFile); i++ { + if a, b := p.ReferenceFile[i], q.ReferenceFile[i]; a != b { + return a < b + } + if a, b := p.ReferenceLine[i], q.ReferenceLine[i]; a != b { + return a < b + } + } + return false +} + +func (p *Comment) readPoComment(r *lineReader) (err error) { + *p = Comment{} + if err = r.skipBlankLine(); err != nil { + return err + } + defer func(oldPos int) { + newPos := r.currentPos() + if newPos != oldPos && err == io.EOF { + err = nil + } + }(r.currentPos()) + + p.StartLine = r.currentPos() + 1 + for { + var s string + if s, _, err = r.currentLine(); err != nil { + return + } + if len(s) == 0 || s[0] != '#' { + return + } + + if err = p.readTranslatorComment(r); err != nil { + return + } + if err = p.readExtractedComment(r); err != nil { + return + } + if err = p.readReferenceComment(r); err != nil { + return + } + if err = p.readFlagsComment(r); err != nil { + return + } + if err = p.readPrevMsgContext(r); err != nil { + return + } + if err = p.readPrevMsgId(r); err != nil { + return + } + } +} + +func (p *Comment) readTranslatorComment(r *lineReader) (err error) { + const prefix = "# " // .,:| + for { + var s string + if s, _, err = r.readLine(); err != nil { + return err + } + if len(s) < 1 || s[0] != '#' { + r.unreadLine() + return nil + } + if len(s) >= 2 { + switch s[1] { + case '.', ',', ':', '|': + r.unreadLine() + return nil + } + } + if p.TranslatorComment != "" { + p.TranslatorComment += "\n" + } + p.TranslatorComment += strings.TrimSpace(s[1:]) + } +} + +func (p *Comment) readExtractedComment(r *lineReader) (err error) { + const prefix = "#." + for { + var s string + if s, _, err = r.readLine(); err != nil { + return err + } + if len(s) < len(prefix) || s[:len(prefix)] != prefix { + r.unreadLine() + return nil + } + if p.ExtractedComment != "" { + p.ExtractedComment += "\n" + } + p.ExtractedComment += strings.TrimSpace(s[len(prefix):]) + } +} + +func (p *Comment) readReferenceComment(r *lineReader) (err error) { + const prefix = "#:" + for { + var s string + if s, _, err = r.readLine(); err != nil { + return err + } + if len(s) < len(prefix) || s[:len(prefix)] != prefix { + r.unreadLine() + return nil + } + ss := strings.Split(strings.TrimSpace(s[len(prefix):]), " ") + for i := 0; i < len(ss); i++ { + idx := strings.Index(ss[i], ":") + if idx <= 0 { + continue + } + name := strings.TrimSpace(ss[i][:idx]) + line, _ := strconv.Atoi(strings.TrimSpace(ss[i][idx+1:])) + p.ReferenceFile = append(p.ReferenceFile, name) + p.ReferenceLine = append(p.ReferenceLine, line) + } + } +} + +func (p *Comment) readFlagsComment(r *lineReader) (err error) { + const prefix = "#," + for { + var s string + if s, _, err = r.readLine(); err != nil { + return err + } + if len(s) < len(prefix) || s[:len(prefix)] != prefix { + r.unreadLine() + return nil + } + ss := strings.Split(strings.TrimSpace(s[len(prefix):]), ",") + for i := 0; i < len(ss); i++ { + p.Flags = append(p.Flags, strings.TrimSpace(ss[i])) + } + } +} + +func (p *Comment) readPrevMsgContext(r *lineReader) (err error) { + var s string + if s, _, err = r.currentLine(); err != nil { + return + } + if !rePrevMsgContextComments.MatchString(s) { + return + } + p.PrevMsgContext, err = p.readString(r) + return +} + +func (p *Comment) readPrevMsgId(r *lineReader) (err error) { + var s string + if s, _, err = r.currentLine(); err != nil { + return + } + if !rePrevMsgIdComments.MatchString(s) { + return + } + p.PrevMsgId, err = p.readString(r) + return +} + +func (p *Comment) readString(r *lineReader) (msg string, err error) { + var s string + if s, _, err = r.readLine(); err != nil { + return + } + msg += decodePoString(s) + for { + if s, _, err = r.readLine(); err != nil { + return + } + if !reStringLineComments.MatchString(s) { + r.unreadLine() + break + } + msg += decodePoString(s) + } + return +} + +// GetFuzzy gets the fuzzy flag. +func (p *Comment) GetFuzzy() bool { + for _, s := range p.Flags { + if s == "fuzzy" { + return true + } + } + return false +} + +// SetFuzzy sets the fuzzy flag. +func (p *Comment) SetFuzzy(fuzzy bool) { + // +} + +// String returns the po format comment string. +func (p Comment) String() string { + var buf bytes.Buffer + if p.TranslatorComment != "" { + ss := strings.Split(p.TranslatorComment, "\n") + for i := 0; i < len(ss); i++ { + fmt.Fprintf(&buf, "# %s\n", ss[i]) + } + } + if p.ExtractedComment != "" { + ss := strings.Split(p.ExtractedComment, "\n") + for i := 0; i < len(ss); i++ { + fmt.Fprintf(&buf, "#. %s\n", ss[i]) + } + } + if a, b := len(p.ReferenceFile), len(p.ReferenceLine); a != 0 && a == b { + fmt.Fprintf(&buf, "#:") + for i := 0; i < len(p.ReferenceFile); i++ { + fmt.Fprintf(&buf, " %s:%d", p.ReferenceFile[i], p.ReferenceLine[i]) + } + fmt.Fprintf(&buf, "\n") + } + if len(p.Flags) != 0 { + fmt.Fprintf(&buf, "#, %s", p.Flags[0]) + for i := 1; i < len(p.Flags); i++ { + fmt.Fprintf(&buf, ", %s", p.Flags[i]) + } + fmt.Fprintf(&buf, "\n") + } + if p.PrevMsgContext != "" { + s := encodeCommentPoString(p.PrevMsgContext) + fmt.Fprintf(&buf, "#| msgctxt %s\n", s) + } + if p.PrevMsgId != "" { + s := encodeCommentPoString(p.PrevMsgId) + fmt.Fprintf(&buf, "#| msgid %s\n", s) + } + return buf.String() +} diff --git a/vendor/github.com/chai2010/gettext-go/gettext/po/doc.go b/vendor/github.com/chai2010/gettext-go/gettext/po/doc.go new file mode 100644 index 0000000000..12bac8f2a2 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/gettext/po/doc.go @@ -0,0 +1,24 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package po provides support for reading and writing GNU PO file. + +Examples: + import ( + "github.com/chai2010/gettext-go/gettext/po" + ) + + func main() { + poFile, err := po.Load("test.po") + if err != nil { + log.Fatal(err) + } + fmt.Printf("%v", poFile) + } + +The GNU PO file specification is at +http://www.gnu.org/software/gettext/manual/html_node/PO-Files.html. +*/ +package po diff --git a/vendor/github.com/chai2010/gettext-go/gettext/po/file.go b/vendor/github.com/chai2010/gettext-go/gettext/po/file.go new file mode 100644 index 0000000000..a9b7abf949 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/gettext/po/file.go @@ -0,0 +1,75 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package po + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "sort" +) + +// File represents an PO File. +// +// See http://www.gnu.org/software/gettext/manual/html_node/PO-Files.html +type File struct { + MimeHeader Header + Messages []Message +} + +// Load loads a named po file. +func Load(name string) (*File, error) { + data, err := ioutil.ReadFile(name) + if err != nil { + return nil, err + } + return LoadData(data) +} + +// LoadData loads po file format data. +func LoadData(data []byte) (*File, error) { + r := newLineReader(string(data)) + var file File + for { + var msg Message + if err := msg.readPoEntry(r); err != nil { + if err == io.EOF { + return &file, nil + } + return nil, err + } + if msg.MsgId == "" { + file.MimeHeader.parseHeader(&msg) + continue + } + file.Messages = append(file.Messages, msg) + } +} + +// Save saves a po file. +func (f *File) Save(name string) error { + return ioutil.WriteFile(name, []byte(f.String()), 0666) +} + +// Save returns a po file format data. +func (f *File) Data() []byte { + // sort the massge as ReferenceFile/ReferenceLine field + var messages []Message + messages = append(messages, f.Messages...) + sort.Sort(byMessages(messages)) + + var buf bytes.Buffer + fmt.Fprintf(&buf, "%s\n", f.MimeHeader.String()) + for i := 0; i < len(messages); i++ { + fmt.Fprintf(&buf, "%s\n", messages[i].String()) + } + return buf.Bytes() +} + +// String returns the po format file string. +func (f *File) String() string { + return string(f.Data()) +} diff --git a/vendor/github.com/chai2010/gettext-go/gettext/po/header.go b/vendor/github.com/chai2010/gettext-go/gettext/po/header.go new file mode 100644 index 0000000000..a9b5b6671b --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/gettext/po/header.go @@ -0,0 +1,106 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package po + +import ( + "bytes" + "fmt" + "strings" +) + +// Header is the initial comments "SOME DESCRIPTIVE TITLE", "YEAR" +// and "FIRST AUTHOR , YEAR" ought to be replaced by sensible information. +// +// See http://www.gnu.org/software/gettext/manual/html_node/Header-Entry.html#Header-Entry +type Header struct { + Comment // Header Comments + ProjectIdVersion string // Project-Id-Version: PACKAGE VERSION + ReportMsgidBugsTo string // Report-Msgid-Bugs-To: FIRST AUTHOR + POTCreationDate string // POT-Creation-Date: YEAR-MO-DA HO:MI+ZONE + PORevisionDate string // PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE + LastTranslator string // Last-Translator: FIRST AUTHOR + LanguageTeam string // Language-Team: golang-china + Language string // Language: zh_CN + MimeVersion string // MIME-Version: 1.0 + ContentType string // Content-Type: text/plain; charset=UTF-8 + ContentTransferEncoding string // Content-Transfer-Encoding: 8bit + PluralForms string // Plural-Forms: nplurals=2; plural=n == 1 ? 0 : 1; + XGenerator string // X-Generator: Poedit 1.5.5 + UnknowFields map[string]string +} + +func (p *Header) parseHeader(msg *Message) { + if msg.MsgId != "" || msg.MsgStr == "" { + return + } + lines := strings.Split(msg.MsgStr, "\n") + for i := 0; i < len(lines); i++ { + idx := strings.Index(lines[i], ":") + if idx < 0 { + continue + } + key := strings.TrimSpace(lines[i][:idx]) + val := strings.TrimSpace(lines[i][idx+1:]) + switch strings.ToUpper(key) { + case strings.ToUpper("Project-Id-Version"): + p.ProjectIdVersion = val + case strings.ToUpper("Report-Msgid-Bugs-To"): + p.ReportMsgidBugsTo = val + case strings.ToUpper("POT-Creation-Date"): + p.POTCreationDate = val + case strings.ToUpper("PO-Revision-Date"): + p.PORevisionDate = val + case strings.ToUpper("Last-Translator"): + p.LastTranslator = val + case strings.ToUpper("Language-Team"): + p.LanguageTeam = val + case strings.ToUpper("Language"): + p.Language = val + case strings.ToUpper("MIME-Version"): + p.MimeVersion = val + case strings.ToUpper("Content-Type"): + p.ContentType = val + case strings.ToUpper("Content-Transfer-Encoding"): + p.ContentTransferEncoding = val + case strings.ToUpper("Plural-Forms"): + p.PluralForms = val + case strings.ToUpper("X-Generator"): + p.XGenerator = val + default: + if p.UnknowFields == nil { + p.UnknowFields = make(map[string]string) + } + p.UnknowFields[key] = val + } + } + p.Comment = msg.Comment +} + +// String returns the po format header string. +func (p Header) String() string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "%s", p.Comment.String()) + fmt.Fprintf(&buf, `msgid ""`+"\n") + fmt.Fprintf(&buf, `msgstr ""`+"\n") + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Project-Id-Version", p.ProjectIdVersion) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Report-Msgid-Bugs-To", p.ReportMsgidBugsTo) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "POT-Creation-Date", p.POTCreationDate) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "PO-Revision-Date", p.PORevisionDate) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Last-Translator", p.LastTranslator) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Language-Team", p.LanguageTeam) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Language", p.Language) + if p.MimeVersion != "" { + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "MIME-Version", p.MimeVersion) + } + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Content-Type", p.ContentType) + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "Content-Transfer-Encoding", p.ContentTransferEncoding) + if p.XGenerator != "" { + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", "X-Generator", p.XGenerator) + } + for k, v := range p.UnknowFields { + fmt.Fprintf(&buf, `"%s: %s\n"`+"\n", k, v) + } + return buf.String() +} diff --git a/vendor/github.com/chai2010/gettext-go/gettext/po/line_reader.go b/vendor/github.com/chai2010/gettext-go/gettext/po/line_reader.go new file mode 100644 index 0000000000..8597273a2b --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/gettext/po/line_reader.go @@ -0,0 +1,62 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package po + +import ( + "io" + "strings" +) + +type lineReader struct { + lines []string + pos int +} + +func newLineReader(data string) *lineReader { + data = strings.Replace(data, "\r", "", -1) + lines := strings.Split(data, "\n") + return &lineReader{lines: lines} +} + +func (r *lineReader) skipBlankLine() error { + for ; r.pos < len(r.lines); r.pos++ { + if strings.TrimSpace(r.lines[r.pos]) != "" { + break + } + } + if r.pos >= len(r.lines) { + return io.EOF + } + return nil +} + +func (r *lineReader) currentPos() int { + return r.pos +} + +func (r *lineReader) currentLine() (s string, pos int, err error) { + if r.pos >= len(r.lines) { + err = io.EOF + return + } + s, pos = r.lines[r.pos], r.pos + return +} + +func (r *lineReader) readLine() (s string, pos int, err error) { + if r.pos >= len(r.lines) { + err = io.EOF + return + } + s, pos = r.lines[r.pos], r.pos + r.pos++ + return +} + +func (r *lineReader) unreadLine() { + if r.pos >= 0 { + r.pos-- + } +} diff --git a/vendor/github.com/chai2010/gettext-go/gettext/po/message.go b/vendor/github.com/chai2010/gettext-go/gettext/po/message.go new file mode 100644 index 0000000000..a2cf2512c7 --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/gettext/po/message.go @@ -0,0 +1,189 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package po + +import ( + "bytes" + "fmt" + "io" + "strconv" + "strings" +) + +// A PO file is made up of many entries, +// each entry holding the relation between an original untranslated string +// and its corresponding translation. +// +// See http://www.gnu.org/software/gettext/manual/html_node/PO-Files.html +type Message struct { + Comment // Coments + MsgContext string // msgctxt context + MsgId string // msgid untranslated-string + MsgIdPlural string // msgid_plural untranslated-string-plural + MsgStr string // msgstr translated-string + MsgStrPlural []string // msgstr[0] translated-string-case-0 +} + +type byMessages []Message + +func (d byMessages) Len() int { + return len(d) +} +func (d byMessages) Less(i, j int) bool { + if d[i].Comment.less(&d[j].Comment) { + return true + } + if a, b := d[i].MsgContext, d[j].MsgContext; a != b { + return a < b + } + if a, b := d[i].MsgId, d[j].MsgId; a != b { + return a < b + } + if a, b := d[i].MsgIdPlural, d[j].MsgIdPlural; a != b { + return a < b + } + return false +} +func (d byMessages) Swap(i, j int) { + d[i], d[j] = d[j], d[i] +} + +func (p *Message) readPoEntry(r *lineReader) (err error) { + *p = Message{} + if err = r.skipBlankLine(); err != nil { + return + } + defer func(oldPos int) { + newPos := r.currentPos() + if newPos != oldPos && err == io.EOF { + err = nil + } + }(r.currentPos()) + + if err = p.Comment.readPoComment(r); err != nil { + return + } + for { + var s string + if s, _, err = r.currentLine(); err != nil { + return + } + + if p.isInvalidLine(s) { + err = fmt.Errorf("gettext: line %d, %v", r.currentPos(), "invalid line") + return + } + if reComment.MatchString(s) || reBlankLine.MatchString(s) { + return + } + + if err = p.readMsgContext(r); err != nil { + return + } + if err = p.readMsgId(r); err != nil { + return + } + if err = p.readMsgIdPlural(r); err != nil { + return + } + if err = p.readMsgStrOrPlural(r); err != nil { + return + } + } +} + +func (p *Message) readMsgContext(r *lineReader) (err error) { + var s string + if s, _, err = r.currentLine(); err != nil { + return + } + if !reMsgContext.MatchString(s) { + return + } + p.MsgContext, err = p.readString(r) + return +} + +func (p *Message) readMsgId(r *lineReader) (err error) { + var s string + if s, _, err = r.currentLine(); err != nil { + return + } + if !reMsgId.MatchString(s) { + return + } + p.MsgId, err = p.readString(r) + return +} + +func (p *Message) readMsgIdPlural(r *lineReader) (err error) { + var s string + if s, _, err = r.currentLine(); err != nil { + return + } + if !reMsgIdPlural.MatchString(s) { + return + } + p.MsgIdPlural, err = p.readString(r) + return nil +} + +func (p *Message) readMsgStrOrPlural(r *lineReader) (err error) { + var s string + if s, _, err = r.currentLine(); err != nil { + return + } + if !reMsgStr.MatchString(s) && !reMsgStrPlural.MatchString(s) { + return + } + if reMsgStrPlural.MatchString(s) { + left, right := strings.Index(s, `[`), strings.LastIndex(s, `]`) + idx, _ := strconv.Atoi(s[left+1 : right]) + s, err = p.readString(r) + if n := len(p.MsgStrPlural); (idx + 1) > n { + p.MsgStrPlural = append(p.MsgStrPlural, make([]string, (idx+1)-n)...) + } + p.MsgStrPlural[idx] = s + } else { + p.MsgStr, err = p.readString(r) + } + return nil +} + +func (p *Message) readString(r *lineReader) (msg string, err error) { + var s string + if s, _, err = r.readLine(); err != nil { + return + } + msg += decodePoString(s) + for { + if s, _, err = r.readLine(); err != nil { + return + } + if !reStringLine.MatchString(s) { + r.unreadLine() + break + } + msg += decodePoString(s) + } + return +} + +// String returns the po format entry string. +func (p Message) String() string { + var buf bytes.Buffer + fmt.Fprintf(&buf, "%s", p.Comment.String()) + fmt.Fprintf(&buf, "msgid %s", encodePoString(p.MsgId)) + if p.MsgIdPlural != "" { + fmt.Fprintf(&buf, "msgid_plural %s", encodePoString(p.MsgIdPlural)) + } + if p.MsgStr != "" { + fmt.Fprintf(&buf, "msgstr %s", encodePoString(p.MsgStr)) + } + for i := 0; i < len(p.MsgStrPlural); i++ { + fmt.Fprintf(&buf, "msgstr[%d] %s", i, encodePoString(p.MsgStrPlural[i])) + } + return buf.String() +} diff --git a/vendor/github.com/chai2010/gettext-go/gettext/po/re.go b/vendor/github.com/chai2010/gettext-go/gettext/po/re.go new file mode 100644 index 0000000000..67c240a57b --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/gettext/po/re.go @@ -0,0 +1,58 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package po + +import ( + "regexp" +) + +var ( + reComment = regexp.MustCompile(`^#`) // # + reExtractedComments = regexp.MustCompile(`^#\.`) // #. + reReferenceComments = regexp.MustCompile(`^#:`) // #: + reFlagsComments = regexp.MustCompile(`^#,`) // #, fuzzy,c-format + rePrevMsgContextComments = regexp.MustCompile(`^#\|\s+msgctxt`) // #| msgctxt + rePrevMsgIdComments = regexp.MustCompile(`^#\|\s+msgid`) // #| msgid + reStringLineComments = regexp.MustCompile(`^#\|\s+".*"\s*$`) // #| "message" + + reMsgContext = regexp.MustCompile(`^msgctxt\s+".*"\s*$`) // msgctxt + reMsgId = regexp.MustCompile(`^msgid\s+".*"\s*$`) // msgid + reMsgIdPlural = regexp.MustCompile(`^msgid_plural\s+".*"\s*$`) // msgid_plural + reMsgStr = regexp.MustCompile(`^msgstr\s*".*"\s*$`) // msgstr + reMsgStrPlural = regexp.MustCompile(`^msgstr\s*(\[\d+\])\s*".*"\s*$`) // msgstr[0] + reStringLine = regexp.MustCompile(`^\s*".*"\s*$`) // "message" + reBlankLine = regexp.MustCompile(`^\s*$`) // +) + +func (p *Message) isInvalidLine(s string) bool { + if reComment.MatchString(s) { + return false + } + if reBlankLine.MatchString(s) { + return false + } + + if reMsgContext.MatchString(s) { + return false + } + if reMsgId.MatchString(s) { + return false + } + if reMsgIdPlural.MatchString(s) { + return false + } + if reMsgStr.MatchString(s) { + return false + } + if reMsgStrPlural.MatchString(s) { + return false + } + + if reStringLine.MatchString(s) { + return false + } + + return true +} diff --git a/vendor/github.com/chai2010/gettext-go/gettext/po/util.go b/vendor/github.com/chai2010/gettext-go/gettext/po/util.go new file mode 100644 index 0000000000..52544832cf --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/gettext/po/util.go @@ -0,0 +1,110 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package po + +import ( + "bytes" + "strings" +) + +func decodePoString(text string) string { + lines := strings.Split(text, "\n") + for i := 0; i < len(lines); i++ { + left := strings.Index(lines[i], `"`) + right := strings.LastIndex(lines[i], `"`) + if left < 0 || right < 0 || left == right { + lines[i] = "" + continue + } + line := lines[i][left+1 : right] + data := make([]byte, 0, len(line)) + for i := 0; i < len(line); i++ { + if line[i] != '\\' { + data = append(data, line[i]) + continue + } + if i+1 >= len(line) { + break + } + switch line[i+1] { + case 'n': // \\n -> \n + data = append(data, '\n') + i++ + case 't': // \\t -> \n + data = append(data, '\t') + i++ + case '\\': // \\\ -> ? + data = append(data, '\\') + i++ + } + } + lines[i] = string(data) + } + return strings.Join(lines, "") +} + +func encodePoString(text string) string { + var buf bytes.Buffer + lines := strings.Split(text, "\n") + for i := 0; i < len(lines); i++ { + if lines[i] == "" { + if i != len(lines)-1 { + buf.WriteString(`"\n"` + "\n") + } + continue + } + buf.WriteRune('"') + for _, r := range lines[i] { + switch r { + case '\\': + buf.WriteString(`\\`) + case '"': + buf.WriteString(`\"`) + case '\n': + buf.WriteString(`\n`) + case '\t': + buf.WriteString(`\t`) + default: + buf.WriteRune(r) + } + } + buf.WriteString(`\n"` + "\n") + } + return buf.String() +} + +func encodeCommentPoString(text string) string { + var buf bytes.Buffer + lines := strings.Split(text, "\n") + if len(lines) > 1 { + buf.WriteString(`""` + "\n") + } + for i := 0; i < len(lines); i++ { + if len(lines) > 0 { + buf.WriteString("#| ") + } + buf.WriteRune('"') + for _, r := range lines[i] { + switch r { + case '\\': + buf.WriteString(`\\`) + case '"': + buf.WriteString(`\"`) + case '\n': + buf.WriteString(`\n`) + case '\t': + buf.WriteString(`\t`) + default: + buf.WriteRune(r) + } + } + if i < len(lines)-1 { + buf.WriteString(`\n"` + "\n") + } else { + buf.WriteString(`"`) + } + } + return buf.String() +} diff --git a/vendor/github.com/chai2010/gettext-go/gettext/tr.go b/vendor/github.com/chai2010/gettext-go/gettext/tr.go new file mode 100644 index 0000000000..fedfbc301d --- /dev/null +++ b/vendor/github.com/chai2010/gettext-go/gettext/tr.go @@ -0,0 +1,128 @@ +// Copyright 2013 ChaiShushan . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gettext + +import ( + "github.com/chai2010/gettext-go/gettext/mo" + "github.com/chai2010/gettext-go/gettext/plural" + "github.com/chai2010/gettext-go/gettext/po" +) + +var nilTranslator = &translator{ + MessageMap: make(map[string]mo.Message), + PluralFormula: plural.Formula("??"), +} + +type translator struct { + MessageMap map[string]mo.Message + PluralFormula func(n int) int +} + +func newMoTranslator(name string, data []byte) (*translator, error) { + var ( + f *mo.File + err error + ) + if len(data) != 0 { + f, err = mo.LoadData(data) + } else { + f, err = mo.Load(name) + } + if err != nil { + return nil, err + } + var tr = &translator{ + MessageMap: make(map[string]mo.Message), + } + for _, v := range f.Messages { + tr.MessageMap[tr.makeMapKey(v.MsgContext, v.MsgId)] = v + } + if lang := f.MimeHeader.Language; lang != "" { + tr.PluralFormula = plural.Formula(lang) + } else { + tr.PluralFormula = plural.Formula("??") + } + return tr, nil +} + +func newPoTranslator(name string, data []byte) (*translator, error) { + var ( + f *po.File + err error + ) + if len(data) != 0 { + f, err = po.LoadData(data) + } else { + f, err = po.Load(name) + } + if err != nil { + return nil, err + } + var tr = &translator{ + MessageMap: make(map[string]mo.Message), + } + for _, v := range f.Messages { + tr.MessageMap[tr.makeMapKey(v.MsgContext, v.MsgId)] = mo.Message{ + MsgContext: v.MsgContext, + MsgId: v.MsgId, + MsgIdPlural: v.MsgIdPlural, + MsgStr: v.MsgStr, + MsgStrPlural: v.MsgStrPlural, + } + } + if lang := f.MimeHeader.Language; lang != "" { + tr.PluralFormula = plural.Formula(lang) + } else { + tr.PluralFormula = plural.Formula("??") + } + return tr, nil +} + +func (p *translator) PGettext(msgctxt, msgid string) string { + return p.PNGettext(msgctxt, msgid, "", 0) +} + +func (p *translator) PNGettext(msgctxt, msgid, msgidPlural string, n int) string { + n = p.PluralFormula(n) + if ss := p.findMsgStrPlural(msgctxt, msgid, msgidPlural); len(ss) != 0 { + if n >= len(ss) { + n = len(ss) - 1 + } + if ss[n] != "" { + return ss[n] + } + } + if msgidPlural != "" && n > 0 { + return msgidPlural + } + return msgid +} + +func (p *translator) findMsgStrPlural(msgctxt, msgid, msgidPlural string) []string { + key := p.makeMapKey(msgctxt, msgid) + if v, ok := p.MessageMap[key]; ok { + if len(v.MsgIdPlural) != 0 { + if len(v.MsgStrPlural) != 0 { + return v.MsgStrPlural + } else { + return nil + } + } else { + if len(v.MsgStr) != 0 { + return []string{v.MsgStr} + } else { + return nil + } + } + } + return nil +} + +func (p *translator) makeMapKey(msgctxt, msgid string) string { + if msgctxt != "" { + return msgctxt + mo.EotSeparator + msgid + } + return msgid +} diff --git a/vendor/github.com/cilium/charts/CNAME b/vendor/github.com/cilium/charts/CNAME new file mode 100644 index 0000000000..96ab6f7ed3 --- /dev/null +++ b/vendor/github.com/cilium/charts/CNAME @@ -0,0 +1 @@ +helm.cilium.io diff --git a/vendor/github.com/cilium/charts/README-dev.md b/vendor/github.com/cilium/charts/README-dev.md new file mode 100644 index 0000000000..96db49033b --- /dev/null +++ b/vendor/github.com/cilium/charts/README-dev.md @@ -0,0 +1,15 @@ +To update the development version of a helm chart, go to the branch +that you want to update in the cilium/cilium repo. + +``` +$ git checkout v1.8 +``` + +Generate helm charts + +``` +$ echo "$(git branch --show-current | sed 's/v//')-dev" > VERSION && \ + make -C install/kubernetes +``` + +Run the `prepare_artifacts.sh` script from this repository. diff --git a/vendor/github.com/cilium/charts/README.md b/vendor/github.com/cilium/charts/README.md new file mode 100644 index 0000000000..a46a90df7a --- /dev/null +++ b/vendor/github.com/cilium/charts/README.md @@ -0,0 +1,88 @@ +This repository holds helm templates for the following releases: + +* [v1.11.1](https://github.com/cilium/cilium/releases/tag/v1.11.1) (_[source](https://github.com/cilium/cilium/tree/v1.11.1/install/kubernetes/cilium)_) +* [v1.11.0](https://github.com/cilium/cilium/releases/tag/v1.11.0) (_[source](https://github.com/cilium/cilium/tree/v1.11.0/install/kubernetes/cilium)_) +* [v1.11.0-rc3](https://github.com/cilium/cilium/releases/tag/v1.11.0-rc3) (_[source](https://github.com/cilium/cilium/tree/v1.11.0-rc3/install/kubernetes/cilium)_) +* [v1.11.0-rc2](https://github.com/cilium/cilium/releases/tag/v1.11.0-rc2) (_[source](https://github.com/cilium/cilium/tree/v1.11.0-rc2/install/kubernetes/cilium)_) +* [v1.11.0-rc1](https://github.com/cilium/cilium/releases/tag/v1.11.0-rc1) (_[source](https://github.com/cilium/cilium/tree/v1.11.0-rc1/install/kubernetes/cilium)_) +* [v1.11.0-rc0](https://github.com/cilium/cilium/releases/tag/v1.11.0-rc0) (_[source](https://github.com/cilium/cilium/tree/v1.11.0-rc0/install/kubernetes/cilium)_) +* [v1.10.7](https://github.com/cilium/cilium/releases/tag/v1.10.7) (_[source](https://github.com/cilium/cilium/tree/v1.10.7/install/kubernetes/cilium)_) +* [v1.10.6](https://github.com/cilium/cilium/releases/tag/v1.10.6) (_[source](https://github.com/cilium/cilium/tree/v1.10.6/install/kubernetes/cilium)_) +* [v1.10.5](https://github.com/cilium/cilium/releases/tag/v1.10.5) (_[source](https://github.com/cilium/cilium/tree/v1.10.5/install/kubernetes/cilium)_) +* [v1.10.4](https://github.com/cilium/cilium/releases/tag/v1.10.4) (_[source](https://github.com/cilium/cilium/tree/v1.10.4/install/kubernetes/cilium)_) +* [v1.10.3](https://github.com/cilium/cilium/releases/tag/v1.10.3) (_[source](https://github.com/cilium/cilium/tree/v1.10.3/install/kubernetes/cilium)_) +* [v1.10.2](https://github.com/cilium/cilium/releases/tag/v1.10.2) (_[source](https://github.com/cilium/cilium/tree/v1.10.2/install/kubernetes/cilium)_) +* [v1.10.1](https://github.com/cilium/cilium/releases/tag/v1.10.1) (_[source](https://github.com/cilium/cilium/tree/v1.10.1/install/kubernetes/cilium)_) +* [v1.10.0](https://github.com/cilium/cilium/releases/tag/v1.10.0) (_[source](https://github.com/cilium/cilium/tree/v1.10.0/install/kubernetes/cilium)_) +* [v1.10.0-rc2](https://github.com/cilium/cilium/releases/tag/v1.10.0-rc2) (_[source](https://github.com/cilium/cilium/tree/v1.10.0-rc2/install/kubernetes/cilium)_) +* [v1.10.0-rc1](https://github.com/cilium/cilium/releases/tag/v1.10.0-rc1) (_[source](https://github.com/cilium/cilium/tree/v1.10.0-rc1/install/kubernetes/cilium)_) +* [v1.10.0-rc0](https://github.com/cilium/cilium/releases/tag/v1.10.0-rc0) (_[source](https://github.com/cilium/cilium/tree/v1.10.0-rc0/install/kubernetes/cilium)_) +* [v1.9.12](https://github.com/cilium/cilium/releases/tag/v1.9.12) (_[source](https://github.com/cilium/cilium/tree/v1.9.12/install/kubernetes/cilium)_) +* [v1.9.11](https://github.com/cilium/cilium/releases/tag/v1.9.11) (_[source](https://github.com/cilium/cilium/tree/v1.9.11/install/kubernetes/cilium)_) +* [v1.9.10](https://github.com/cilium/cilium/releases/tag/v1.9.10) (_[source](https://github.com/cilium/cilium/tree/v1.9.10/install/kubernetes/cilium)_) +* [v1.9.9](https://github.com/cilium/cilium/releases/tag/v1.9.9) (_[source](https://github.com/cilium/cilium/tree/v1.9.9/install/kubernetes/cilium)_) +* [v1.9.8](https://github.com/cilium/cilium/releases/tag/v1.9.8) (_[source](https://github.com/cilium/cilium/tree/v1.9.8/install/kubernetes/cilium)_) +* [v1.9.7](https://github.com/cilium/cilium/releases/tag/v1.9.7) (_[source](https://github.com/cilium/cilium/tree/v1.9.7/install/kubernetes/cilium)_) +* [v1.9.6](https://github.com/cilium/cilium/releases/tag/v1.9.6) (_[source](https://github.com/cilium/cilium/tree/v1.9.6/install/kubernetes/cilium)_) +* [v1.9.5](https://github.com/cilium/cilium/releases/tag/v1.9.5) (_[source](https://github.com/cilium/cilium/tree/v1.9.5/install/kubernetes/cilium)_) +* [v1.9.4](https://github.com/cilium/cilium/releases/tag/v1.9.4) (_[source](https://github.com/cilium/cilium/tree/v1.9.4/install/kubernetes/cilium)_) +* [v1.9.3](https://github.com/cilium/cilium/releases/tag/v1.9.3) (_[source](https://github.com/cilium/cilium/tree/v1.9.3/install/kubernetes/cilium)_) +* [v1.9.2](https://github.com/cilium/cilium/releases/tag/v1.9.2) (_[source](https://github.com/cilium/cilium/tree/v1.9.2/install/kubernetes/cilium)_) +* [v1.9.1](https://github.com/cilium/cilium/releases/tag/v1.9.1) (_[source](https://github.com/cilium/cilium/tree/v1.9.1/install/kubernetes/cilium)_) +* [v1.9.0](https://github.com/cilium/cilium/releases/tag/v1.9.0) (_[source](https://github.com/cilium/cilium/tree/v1.9.0/install/kubernetes/cilium)_) +* [v1.9.0-rc3](https://github.com/cilium/cilium/releases/tag/v1.9.0-rc3) (_[source](https://github.com/cilium/cilium/tree/v1.9.0-rc3/install/kubernetes/cilium)_) +* [v1.9.0-rc2](https://github.com/cilium/cilium/releases/tag/v1.9.0-rc2) (_[source](https://github.com/cilium/cilium/tree/v1.9.0-rc2/install/kubernetes/cilium)_) +* [v1.9.0-rc1](https://github.com/cilium/cilium/releases/tag/v1.9.0-rc1) (_[source](https://github.com/cilium/cilium/tree/v1.9.0-rc1/install/kubernetes/cilium)_) +* [v1.9.0-rc0](https://github.com/cilium/cilium/releases/tag/v1.9.0-rc0) (_[source](https://github.com/cilium/cilium/tree/v1.9.0-rc0/install/kubernetes/cilium)_) +* [v1.8.13](https://github.com/cilium/cilium/releases/tag/v1.8.13) (_[source](https://github.com/cilium/cilium/tree/v1.8.13/install/kubernetes/cilium)_) +* [v1.8.12](https://github.com/cilium/cilium/releases/tag/v1.8.12) (_[source](https://github.com/cilium/cilium/tree/v1.8.12/install/kubernetes/cilium)_) +* [v1.8.11](https://github.com/cilium/cilium/releases/tag/v1.8.11) (_[source](https://github.com/cilium/cilium/tree/v1.8.11/install/kubernetes/cilium)_) +* [v1.8.10](https://github.com/cilium/cilium/releases/tag/v1.8.10) (_[source](https://github.com/cilium/cilium/tree/v1.8.10/install/kubernetes/cilium)_) +* [v1.8.9](https://github.com/cilium/cilium/releases/tag/v1.8.9) (_[source](https://github.com/cilium/cilium/tree/v1.8.9/install/kubernetes/cilium)_) +* [v1.8.8](https://github.com/cilium/cilium/releases/tag/v1.8.8) (_[source](https://github.com/cilium/cilium/tree/v1.8.8/install/kubernetes/cilium)_) +* [v1.8.7](https://github.com/cilium/cilium/releases/tag/v1.8.7) (_[source](https://github.com/cilium/cilium/tree/v1.8.7/install/kubernetes/cilium)_) +* [v1.8.6](https://github.com/cilium/cilium/releases/tag/v1.8.6) (_[source](https://github.com/cilium/cilium/tree/v1.8.6/install/kubernetes/cilium)_) +* [v1.8.5](https://github.com/cilium/cilium/releases/tag/v1.8.5) (_[source](https://github.com/cilium/cilium/tree/v1.8.5/install/kubernetes/cilium)_) +* [v1.8.4](https://github.com/cilium/cilium/releases/tag/v1.8.4) (_[source](https://github.com/cilium/cilium/tree/v1.8.4/install/kubernetes/cilium)_) +* [v1.8.3](https://github.com/cilium/cilium/releases/tag/v1.8.3) (_[source](https://github.com/cilium/cilium/tree/v1.8.3/install/kubernetes/cilium)_) +* [v1.8.2](https://github.com/cilium/cilium/releases/tag/v1.8.2) (_[source](https://github.com/cilium/cilium/tree/v1.8.2/install/kubernetes/cilium)_) +* [v1.8.1](https://github.com/cilium/cilium/releases/tag/v1.8.1) (_[source](https://github.com/cilium/cilium/tree/v1.8.1/install/kubernetes/cilium)_) +* [v1.8.0](https://github.com/cilium/cilium/releases/tag/v1.8.0) (_[source](https://github.com/cilium/cilium/tree/v1.8.0/install/kubernetes/cilium)_) +* [v1.8.0-rc4](https://github.com/cilium/cilium/releases/tag/v1.8.0-rc4) (_[source](https://github.com/cilium/cilium/tree/v1.8.0-rc4/install/kubernetes/cilium)_) +* [v1.8.0-rc3](https://github.com/cilium/cilium/releases/tag/v1.8.0-rc3) (_[source](https://github.com/cilium/cilium/tree/v1.8.0-rc3/install/kubernetes/cilium)_) +* [v1.8.0-rc2](https://github.com/cilium/cilium/releases/tag/v1.8.0-rc2) (_[source](https://github.com/cilium/cilium/tree/v1.8.0-rc2/install/kubernetes/cilium)_) +* [v1.8.0-rc1](https://github.com/cilium/cilium/releases/tag/v1.8.0-rc1) (_[source](https://github.com/cilium/cilium/tree/v1.8.0-rc1/install/kubernetes/cilium)_) +* [v1.7.16](https://github.com/cilium/cilium/releases/tag/v1.7.16) (_[source](https://github.com/cilium/cilium/tree/v1.7.16/install/kubernetes/cilium)_) +* [v1.7.15](https://github.com/cilium/cilium/releases/tag/v1.7.15) (_[source](https://github.com/cilium/cilium/tree/v1.7.15/install/kubernetes/cilium)_) +* [v1.7.14](https://github.com/cilium/cilium/releases/tag/v1.7.14) (_[source](https://github.com/cilium/cilium/tree/v1.7.14/install/kubernetes/cilium)_) +* [v1.7.13](https://github.com/cilium/cilium/releases/tag/v1.7.13) (_[source](https://github.com/cilium/cilium/tree/v1.7.13/install/kubernetes/cilium)_) +* [v1.7.12](https://github.com/cilium/cilium/releases/tag/v1.7.12) (_[source](https://github.com/cilium/cilium/tree/v1.7.12/install/kubernetes/cilium)_) +* [v1.7.11](https://github.com/cilium/cilium/releases/tag/v1.7.11) (_[source](https://github.com/cilium/cilium/tree/v1.7.11/install/kubernetes/cilium)_) +* [v1.7.10](https://github.com/cilium/cilium/releases/tag/v1.7.10) (_[source](https://github.com/cilium/cilium/tree/v1.7.10/install/kubernetes/cilium)_) +* [v1.7.9](https://github.com/cilium/cilium/releases/tag/v1.7.9) (_[source](https://github.com/cilium/cilium/tree/v1.7.9/install/kubernetes/cilium)_) +* [v1.7.8](https://github.com/cilium/cilium/releases/tag/v1.7.8) (_[source](https://github.com/cilium/cilium/tree/v1.7.8/install/kubernetes/cilium)_) +* [v1.7.7](https://github.com/cilium/cilium/releases/tag/v1.7.7) (_[source](https://github.com/cilium/cilium/tree/v1.7.7/install/kubernetes/cilium)_) +* [v1.7.6](https://github.com/cilium/cilium/releases/tag/v1.7.6) (_[source](https://github.com/cilium/cilium/tree/v1.7.6/install/kubernetes/cilium)_) +* [v1.7.5](https://github.com/cilium/cilium/releases/tag/v1.7.5) (_[source](https://github.com/cilium/cilium/tree/v1.7.5/install/kubernetes/cilium)_) +* [v1.7.4](https://github.com/cilium/cilium/releases/tag/v1.7.4) (_[source](https://github.com/cilium/cilium/tree/v1.7.4/install/kubernetes/cilium)_) +* [v1.7.3](https://github.com/cilium/cilium/releases/tag/v1.7.3) (_[source](https://github.com/cilium/cilium/tree/v1.7.3/install/kubernetes/cilium)_) +* [v1.7.2](https://github.com/cilium/cilium/releases/tag/v1.7.2) (_[source](https://github.com/cilium/cilium/tree/v1.7.2/install/kubernetes/cilium)_) +* [v1.7.1](https://github.com/cilium/cilium/releases/tag/v1.7.1) (_[source](https://github.com/cilium/cilium/tree/v1.7.1/install/kubernetes/cilium)_) +* [v1.7.0](https://github.com/cilium/cilium/releases/tag/v1.7.0) (_[source](https://github.com/cilium/cilium/tree/v1.7.0/install/kubernetes/cilium)_) +* [v1.7.0-rc4](https://github.com/cilium/cilium/releases/tag/v1.7.0-rc4) (_[source](https://github.com/cilium/cilium/tree/v1.7.0-rc4/install/kubernetes/cilium)_) +* [v1.7.0-rc3](https://github.com/cilium/cilium/releases/tag/v1.7.0-rc3) (_[source](https://github.com/cilium/cilium/tree/v1.7.0-rc3/install/kubernetes/cilium)_) +* [v1.6.12](https://github.com/cilium/cilium/releases/tag/v1.6.12) (_[source](https://github.com/cilium/cilium/tree/v1.6.12/install/kubernetes/cilium)_) +* [v1.6.11](https://github.com/cilium/cilium/releases/tag/v1.6.11) (_[source](https://github.com/cilium/cilium/tree/v1.6.11/install/kubernetes/cilium)_) +* [v1.6.10](https://github.com/cilium/cilium/releases/tag/v1.6.10) (_[source](https://github.com/cilium/cilium/tree/v1.6.10/install/kubernetes/cilium)_) +* [v1.6.9](https://github.com/cilium/cilium/releases/tag/v1.6.9) (_[source](https://github.com/cilium/cilium/tree/v1.6.9/install/kubernetes/cilium)_) +* [v1.6.8](https://github.com/cilium/cilium/releases/tag/v1.6.8) (_[source](https://github.com/cilium/cilium/tree/v1.6.8/install/kubernetes/cilium)_) +* [v1.6.7](https://github.com/cilium/cilium/releases/tag/v1.6.7) (_[source](https://github.com/cilium/cilium/tree/v1.6.7/install/kubernetes/cilium)_) +* [v1.6.6](https://github.com/cilium/cilium/releases/tag/v1.6.6) (_[source](https://github.com/cilium/cilium/tree/v1.6.6/install/kubernetes/cilium)_) +* [v1.6.5](https://github.com/cilium/cilium/releases/tag/v1.6.5) (_[source](https://github.com/cilium/cilium/tree/v1.6.5/install/kubernetes/cilium)_) + +The following development releases pull the latest backports for the corresponding branch: + +* [v1.9-dev](https://github.com/cilium/cilium/tree/v1.9) (_[source](https://github.com/cilium/cilium/tree/v1.9/install/kubernetes/cilium)_) +* [v1.8-dev](https://github.com/cilium/cilium/tree/v1.8) (_[source](https://github.com/cilium/cilium/tree/v1.8/install/kubernetes/cilium)_) +* [v1.7-dev](https://github.com/cilium/cilium/tree/v1.7) (_[source](https://github.com/cilium/cilium/tree/v1.7/install/kubernetes/cilium)_) +* [v1.6-dev](https://github.com/cilium/cilium/tree/v1.6) (_[source](https://github.com/cilium/cilium/tree/v1.6/install/kubernetes/cilium)_) diff --git a/vendor/github.com/cilium/charts/artifacthub-repo.yml b/vendor/github.com/cilium/charts/artifacthub-repo.yml new file mode 100644 index 0000000000..136793f41c --- /dev/null +++ b/vendor/github.com/cilium/charts/artifacthub-repo.yml @@ -0,0 +1,6 @@ +repositoryID: 8947a0c2-77af-49c8-9e83-cf8c7d17929f +owners: + - name: Joe Stringer + email: joe@cilium.io + - name: André Martins + email: andre@cilium.io diff --git a/vendor/github.com/cilium/charts/cilium-1.10.0-rc0.tgz b/vendor/github.com/cilium/charts/cilium-1.10.0-rc0.tgz new file mode 100644 index 0000000000..3564d8bc1c Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.10.0-rc0.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.10.0-rc1.tgz b/vendor/github.com/cilium/charts/cilium-1.10.0-rc1.tgz new file mode 100644 index 0000000000..aa7492898f Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.10.0-rc1.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.10.0-rc2.tgz b/vendor/github.com/cilium/charts/cilium-1.10.0-rc2.tgz new file mode 100644 index 0000000000..d527543ed3 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.10.0-rc2.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.10.0.tgz b/vendor/github.com/cilium/charts/cilium-1.10.0.tgz new file mode 100644 index 0000000000..3a0acb1029 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.10.0.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.10.1.tgz b/vendor/github.com/cilium/charts/cilium-1.10.1.tgz new file mode 100644 index 0000000000..92e091966d Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.10.1.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.10.2.tgz b/vendor/github.com/cilium/charts/cilium-1.10.2.tgz new file mode 100644 index 0000000000..d0d7fba914 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.10.2.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.10.3.tgz b/vendor/github.com/cilium/charts/cilium-1.10.3.tgz new file mode 100644 index 0000000000..6968b8a1b4 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.10.3.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.10.4.tgz b/vendor/github.com/cilium/charts/cilium-1.10.4.tgz new file mode 100644 index 0000000000..837188a7e2 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.10.4.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.10.5.tgz b/vendor/github.com/cilium/charts/cilium-1.10.5.tgz new file mode 100644 index 0000000000..5b3494ee55 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.10.5.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.10.6.tgz b/vendor/github.com/cilium/charts/cilium-1.10.6.tgz new file mode 100644 index 0000000000..9ca09fb23d Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.10.6.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.10.7.tgz b/vendor/github.com/cilium/charts/cilium-1.10.7.tgz new file mode 100644 index 0000000000..e7ec871681 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.10.7.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.11.0-rc0.tgz b/vendor/github.com/cilium/charts/cilium-1.11.0-rc0.tgz new file mode 100644 index 0000000000..34a6e5fa1a Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.11.0-rc0.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.11.0-rc1.tgz b/vendor/github.com/cilium/charts/cilium-1.11.0-rc1.tgz new file mode 100644 index 0000000000..3d6c82ce8b Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.11.0-rc1.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.11.0-rc2.tgz b/vendor/github.com/cilium/charts/cilium-1.11.0-rc2.tgz new file mode 100644 index 0000000000..3047603885 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.11.0-rc2.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.11.0-rc3.tgz b/vendor/github.com/cilium/charts/cilium-1.11.0-rc3.tgz new file mode 100644 index 0000000000..5bc1a8a15c Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.11.0-rc3.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.11.0.tgz b/vendor/github.com/cilium/charts/cilium-1.11.0.tgz new file mode 100644 index 0000000000..baecf73afe Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.11.0.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.11.1.tgz b/vendor/github.com/cilium/charts/cilium-1.11.1.tgz new file mode 100644 index 0000000000..3a787ed5d0 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.11.1.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.6-dev.tgz b/vendor/github.com/cilium/charts/cilium-1.6-dev.tgz new file mode 100644 index 0000000000..520036ba29 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.6-dev.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.6.10.tgz b/vendor/github.com/cilium/charts/cilium-1.6.10.tgz new file mode 100644 index 0000000000..1e082294ed Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.6.10.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.6.11.tgz b/vendor/github.com/cilium/charts/cilium-1.6.11.tgz new file mode 100644 index 0000000000..305d751c71 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.6.11.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.6.12.tgz b/vendor/github.com/cilium/charts/cilium-1.6.12.tgz new file mode 100644 index 0000000000..b70c117ef7 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.6.12.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.6.5.tgz b/vendor/github.com/cilium/charts/cilium-1.6.5.tgz new file mode 100644 index 0000000000..4f519dea78 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.6.5.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.6.6.tgz b/vendor/github.com/cilium/charts/cilium-1.6.6.tgz new file mode 100644 index 0000000000..75b27ee890 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.6.6.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.6.7.tgz b/vendor/github.com/cilium/charts/cilium-1.6.7.tgz new file mode 100644 index 0000000000..efbe7fe8a0 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.6.7.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.6.8.tgz b/vendor/github.com/cilium/charts/cilium-1.6.8.tgz new file mode 100644 index 0000000000..361b28d6cb Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.6.8.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.6.9.tgz b/vendor/github.com/cilium/charts/cilium-1.6.9.tgz new file mode 100644 index 0000000000..56dd05da17 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.6.9.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.7-dev.tgz b/vendor/github.com/cilium/charts/cilium-1.7-dev.tgz new file mode 100644 index 0000000000..94858df6de Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.7-dev.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.7.0-rc3.tgz b/vendor/github.com/cilium/charts/cilium-1.7.0-rc3.tgz new file mode 100644 index 0000000000..58fc4b136e Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.7.0-rc3.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.7.0-rc4.tgz b/vendor/github.com/cilium/charts/cilium-1.7.0-rc4.tgz new file mode 100644 index 0000000000..c3beab70ef Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.7.0-rc4.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.7.0.tgz b/vendor/github.com/cilium/charts/cilium-1.7.0.tgz new file mode 100644 index 0000000000..8ccbcbe72a Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.7.0.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.7.1.tgz b/vendor/github.com/cilium/charts/cilium-1.7.1.tgz new file mode 100644 index 0000000000..54eef42041 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.7.1.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.7.10.tgz b/vendor/github.com/cilium/charts/cilium-1.7.10.tgz new file mode 100644 index 0000000000..5a4dc1dec8 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.7.10.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.7.11.tgz b/vendor/github.com/cilium/charts/cilium-1.7.11.tgz new file mode 100644 index 0000000000..68e254d321 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.7.11.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.7.12.tgz b/vendor/github.com/cilium/charts/cilium-1.7.12.tgz new file mode 100644 index 0000000000..b0ac80807d Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.7.12.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.7.13.tgz b/vendor/github.com/cilium/charts/cilium-1.7.13.tgz new file mode 100644 index 0000000000..e832d988ac Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.7.13.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.7.14.tgz b/vendor/github.com/cilium/charts/cilium-1.7.14.tgz new file mode 100644 index 0000000000..ac0ec51100 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.7.14.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.7.15.tgz b/vendor/github.com/cilium/charts/cilium-1.7.15.tgz new file mode 100644 index 0000000000..7ca811081e Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.7.15.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.7.16.tgz b/vendor/github.com/cilium/charts/cilium-1.7.16.tgz new file mode 100644 index 0000000000..07e9e3e2c0 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.7.16.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.7.2.tgz b/vendor/github.com/cilium/charts/cilium-1.7.2.tgz new file mode 100644 index 0000000000..44b9bd74a9 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.7.2.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.7.3.tgz b/vendor/github.com/cilium/charts/cilium-1.7.3.tgz new file mode 100644 index 0000000000..0fee9478ca Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.7.3.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.7.4.tgz b/vendor/github.com/cilium/charts/cilium-1.7.4.tgz new file mode 100644 index 0000000000..ef76bd4ed8 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.7.4.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.7.5.tgz b/vendor/github.com/cilium/charts/cilium-1.7.5.tgz new file mode 100644 index 0000000000..91b431b5df Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.7.5.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.7.6.tgz b/vendor/github.com/cilium/charts/cilium-1.7.6.tgz new file mode 100644 index 0000000000..a0ad0ea710 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.7.6.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.7.7.tgz b/vendor/github.com/cilium/charts/cilium-1.7.7.tgz new file mode 100644 index 0000000000..f062eb7faf Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.7.7.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.7.8.tgz b/vendor/github.com/cilium/charts/cilium-1.7.8.tgz new file mode 100644 index 0000000000..51156f66fa Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.7.8.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.7.9.tgz b/vendor/github.com/cilium/charts/cilium-1.7.9.tgz new file mode 100644 index 0000000000..4f2290b63e Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.7.9.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.8-dev.tgz b/vendor/github.com/cilium/charts/cilium-1.8-dev.tgz new file mode 100644 index 0000000000..a2fe737473 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.8-dev.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.8.0-rc1.tgz b/vendor/github.com/cilium/charts/cilium-1.8.0-rc1.tgz new file mode 100644 index 0000000000..33be851011 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.8.0-rc1.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.8.0-rc2.tgz b/vendor/github.com/cilium/charts/cilium-1.8.0-rc2.tgz new file mode 100644 index 0000000000..d1376ebe5c Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.8.0-rc2.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.8.0-rc3.tgz b/vendor/github.com/cilium/charts/cilium-1.8.0-rc3.tgz new file mode 100644 index 0000000000..6e9c86c243 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.8.0-rc3.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.8.0-rc4.tgz b/vendor/github.com/cilium/charts/cilium-1.8.0-rc4.tgz new file mode 100644 index 0000000000..7998181da2 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.8.0-rc4.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.8.0.tgz b/vendor/github.com/cilium/charts/cilium-1.8.0.tgz new file mode 100644 index 0000000000..5602765853 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.8.0.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.8.1.tgz b/vendor/github.com/cilium/charts/cilium-1.8.1.tgz new file mode 100644 index 0000000000..184c9bca12 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.8.1.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.8.10.tgz b/vendor/github.com/cilium/charts/cilium-1.8.10.tgz new file mode 100644 index 0000000000..797f22d5ea Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.8.10.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.8.11.tgz b/vendor/github.com/cilium/charts/cilium-1.8.11.tgz new file mode 100644 index 0000000000..46e514af2c Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.8.11.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.8.12.tgz b/vendor/github.com/cilium/charts/cilium-1.8.12.tgz new file mode 100644 index 0000000000..4cbfe97070 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.8.12.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.8.13.tgz b/vendor/github.com/cilium/charts/cilium-1.8.13.tgz new file mode 100644 index 0000000000..1807ce113f Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.8.13.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.8.2.tgz b/vendor/github.com/cilium/charts/cilium-1.8.2.tgz new file mode 100644 index 0000000000..a8d6f5b800 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.8.2.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.8.3.tgz b/vendor/github.com/cilium/charts/cilium-1.8.3.tgz new file mode 100644 index 0000000000..33676503b8 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.8.3.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.8.4.tgz b/vendor/github.com/cilium/charts/cilium-1.8.4.tgz new file mode 100644 index 0000000000..7908e5d496 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.8.4.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.8.5.tgz b/vendor/github.com/cilium/charts/cilium-1.8.5.tgz new file mode 100644 index 0000000000..3cb22d4e9e Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.8.5.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.8.6.tgz b/vendor/github.com/cilium/charts/cilium-1.8.6.tgz new file mode 100644 index 0000000000..a31b96d30e Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.8.6.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.8.7.tgz b/vendor/github.com/cilium/charts/cilium-1.8.7.tgz new file mode 100644 index 0000000000..2e6271186b Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.8.7.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.8.8.tgz b/vendor/github.com/cilium/charts/cilium-1.8.8.tgz new file mode 100644 index 0000000000..b94236163e Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.8.8.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.8.9.tgz b/vendor/github.com/cilium/charts/cilium-1.8.9.tgz new file mode 100644 index 0000000000..ba50143f5f Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.8.9.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.9-dev.tgz b/vendor/github.com/cilium/charts/cilium-1.9-dev.tgz new file mode 100644 index 0000000000..82d13b895f Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.9-dev.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.9.0-rc0.tgz b/vendor/github.com/cilium/charts/cilium-1.9.0-rc0.tgz new file mode 100644 index 0000000000..dfc2f5147a Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.9.0-rc0.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.9.0-rc1.tgz b/vendor/github.com/cilium/charts/cilium-1.9.0-rc1.tgz new file mode 100644 index 0000000000..087d9c4cd9 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.9.0-rc1.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.9.0-rc2.tgz b/vendor/github.com/cilium/charts/cilium-1.9.0-rc2.tgz new file mode 100644 index 0000000000..db7d48df93 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.9.0-rc2.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.9.0-rc3.tgz b/vendor/github.com/cilium/charts/cilium-1.9.0-rc3.tgz new file mode 100644 index 0000000000..691f30d02b Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.9.0-rc3.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.9.0.tgz b/vendor/github.com/cilium/charts/cilium-1.9.0.tgz new file mode 100644 index 0000000000..4ad51b3fbf Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.9.0.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.9.1.tgz b/vendor/github.com/cilium/charts/cilium-1.9.1.tgz new file mode 100644 index 0000000000..dfc31f5569 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.9.1.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.9.10.tgz b/vendor/github.com/cilium/charts/cilium-1.9.10.tgz new file mode 100644 index 0000000000..0b336765f2 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.9.10.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.9.11.tgz b/vendor/github.com/cilium/charts/cilium-1.9.11.tgz new file mode 100644 index 0000000000..49f969d227 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.9.11.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.9.12.tgz b/vendor/github.com/cilium/charts/cilium-1.9.12.tgz new file mode 100644 index 0000000000..1b6844c080 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.9.12.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.9.2.tgz b/vendor/github.com/cilium/charts/cilium-1.9.2.tgz new file mode 100644 index 0000000000..8cf61dcd3b Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.9.2.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.9.3.tgz b/vendor/github.com/cilium/charts/cilium-1.9.3.tgz new file mode 100644 index 0000000000..ed46f67f9e Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.9.3.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.9.4.tgz b/vendor/github.com/cilium/charts/cilium-1.9.4.tgz new file mode 100644 index 0000000000..2fbfe2bb9e Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.9.4.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.9.5.tgz b/vendor/github.com/cilium/charts/cilium-1.9.5.tgz new file mode 100644 index 0000000000..f2ff1be6a0 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.9.5.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.9.6.tgz b/vendor/github.com/cilium/charts/cilium-1.9.6.tgz new file mode 100644 index 0000000000..bd9c86a4e4 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.9.6.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.9.7.tgz b/vendor/github.com/cilium/charts/cilium-1.9.7.tgz new file mode 100644 index 0000000000..0c58c8f951 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.9.7.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.9.8.tgz b/vendor/github.com/cilium/charts/cilium-1.9.8.tgz new file mode 100644 index 0000000000..7e3350a664 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.9.8.tgz differ diff --git a/vendor/github.com/cilium/charts/cilium-1.9.9.tgz b/vendor/github.com/cilium/charts/cilium-1.9.9.tgz new file mode 100644 index 0000000000..b9d584bdc2 Binary files /dev/null and b/vendor/github.com/cilium/charts/cilium-1.9.9.tgz differ diff --git a/vendor/github.com/cilium/charts/fix_dates.sh b/vendor/github.com/cilium/charts/fix_dates.sh new file mode 100644 index 0000000000..312c65f5f5 --- /dev/null +++ b/vendor/github.com/cilium/charts/fix_dates.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +set -eo pipefail + +linesWithDate=($(git blame index.yaml --date=iso-strict \ + | awk '/created/ { print $5; }' \ + | sed 's/)$//')) + +# Lines with the date were overwritten. Take an adjacent line for real date. +dates=($(git blame index.yaml --date=iso-strict \ + | grep -A 1 created \ + | grep -v created \ + | awk '{ print $4 }')) + +for i in ${!linesWithDate[@]}; do + if ! date --date ${dates[$i]} >/dev/null; then + >&2 echo "unclean git tree, stash all changes before running this script." + exit 1 + fi + + sed -i "${linesWithDate[$i]}s/\"[^\"]*\"/\"${dates[$i]}\"/" index.yaml +done diff --git a/vendor/github.com/cilium/charts/helm.go b/vendor/github.com/cilium/charts/helm.go new file mode 100644 index 0000000000..dea70a098c --- /dev/null +++ b/vendor/github.com/cilium/charts/helm.go @@ -0,0 +1,13 @@ +// SPDX-License-Identifier: Apache-2.0 +// Copyright Authors of Cilium + +package helm + +import ( + "embed" +) + +var ( + //go:embed *.tgz + HelmFS embed.FS +) diff --git a/vendor/github.com/cilium/charts/index.yaml b/vendor/github.com/cilium/charts/index.yaml new file mode 100644 index 0000000000..18303d14da --- /dev/null +++ b/vendor/github.com/cilium/charts/index.yaml @@ -0,0 +1,4421 @@ +apiVersion: v1 +entries: + cilium: + - annotations: + artifacthub.io/crds: | + - kind: CiliumNetworkPolicy + version: v2 + name: ciliumnetworkpolicies.cilium.io + displayName: Cilium Network Policy + description: | + Cilium Network Policies provide additional functionality beyond what + is provided by standard Kubernetes NetworkPolicy such as the ability + to allow traffic based on FQDNs, or to filter at Layer 7. + - kind: CiliumClusterwideNetworkPolicy + version: v2 + name: ciliumclusterwidenetworkpolicies.cilium.io + displayName: Cilium Clusterwide Network Policy + description: | + Cilium Clusterwide Network Policies support configuring network traffic + policiies across the entire cluster, including applying node firewalls. + - kind: CiliumExternalWorkload + version: v2 + name: ciliumexternalworkloads.cilium.io + displayName: Cilium External Workload + description: | + Cilium External Workload supports configuring the ability for external + non-Kubernetes workloads to join the cluster. + - kind: CiliumLocalRedirectPolicy + version: v2 + name: ciliumlocalredirectpolicies.cilium.io + displayName: Cilium Local Redirect Policy + description: | + Cilium Local Redirect Policy allows local redirects to be configured + within a node to support use cases like Node-Local DNS or KIAM. + - kind: CiliumNode + version: v2 + name: ciliumnodes.cilium.io + displayName: Cilium Node + description: | + Cilium Node represents a node managed by Cilium. It contains a + specification to control various node specific configuration aspects + and a status section to represent the status of the node. + - kind: CiliumIdentity + version: v2 + name: ciliumidentities.cilium.io + displayName: Cilium Identity + description: | + Cilium Identity allows introspection into security identities that + Cilium allocates which identify sets of labels that are assigned to + individual endpoints in the cluster. + - kind: CiliumEndpoint + version: v2 + name: ciliumendpoints.cilium.io + displayName: Cilium Endpoint + description: | + Cilium Endpoint represents the status of individual pods or nodes in + the cluster which are managed by Cilium, including enforcement status, + IP addressing and whether the networking is succesfully operational. + - kind: CiliumEndpointSlice + version: v2alpha1 + name: ciliumendpointslices.cilium.io + displayName: Cilium Endpoint Slice + description: | + Cilium Endpoint Slice represents the status of groups of pods or nodes + in the cluster which are managed by Cilium, including enforcement status, + IP addressing and whether the networking is succesfully operational. + - kind: CiliumEgressNATPolicy + version: v2alpha1 + name: ciliumegressnatpolicies.cilium.io + displayName: Cilium Egress NAT Policy + description: | + Cilium Egress NAT Policy provides control over the way that traffic + leaves the cluster and which source addresses to use for that traffic. + apiVersion: v2 + appVersion: 1.11.1 + created: "2022-01-18T18:00:31-08:00" + description: eBPF-based Networking, Security, and Observability + digest: 9298ca9495fdc578285d4f2a28ea6e473f4eec22ab56d4e066ef234478ef196a + home: https://cilium.io/ + icon: https://cdn.jsdelivr.net/gh/cilium/cilium@v1.11.1/Documentation/images/logo-solo.svg + keywords: + - BPF + - eBPF + - Kubernetes + - Networking + - Security + - Observability + - Troubleshooting + kubeVersion: '>= 1.16.0-0' + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.11.1.tgz + version: 1.11.1 + - annotations: + artifacthub.io/crds: | + - kind: CiliumNetworkPolicy + version: v2 + name: ciliumnetworkpolicies.cilium.io + displayName: Cilium Network Policy + description: | + Cilium Network Policies provide additional functionality beyond what + is provided by standard Kubernetes NetworkPolicy such as the ability + to allow traffic based on FQDNs, or to filter at Layer 7. + - kind: CiliumClusterwideNetworkPolicy + version: v2 + name: ciliumclusterwidenetworkpolicies.cilium.io + displayName: Cilium Clusterwide Network Policy + description: | + Cilium Clusterwide Network Policies support configuring network traffic + policiies across the entire cluster, including applying node firewalls. + - kind: CiliumExternalWorkload + version: v2 + name: ciliumexternalworkloads.cilium.io + displayName: Cilium External Workload + description: | + Cilium External Workload supports configuring the ability for external + non-Kubernetes workloads to join the cluster. + - kind: CiliumLocalRedirectPolicy + version: v2 + name: ciliumlocalredirectpolicies.cilium.io + displayName: Cilium Local Redirect Policy + description: | + Cilium Local Redirect Policy allows local redirects to be configured + within a node to support use cases like Node-Local DNS or KIAM. + - kind: CiliumNode + version: v2 + name: ciliumnodes.cilium.io + displayName: Cilium Node + description: | + Cilium Node represents a node managed by Cilium. It contains a + specification to control various node specific configuration aspects + and a status section to represent the status of the node. + - kind: CiliumIdentity + version: v2 + name: ciliumidentities.cilium.io + displayName: Cilium Identity + description: | + Cilium Identity allows introspection into security identities that + Cilium allocates which identify sets of labels that are assigned to + individual endpoints in the cluster. + - kind: CiliumEndpoint + version: v2 + name: ciliumendpoints.cilium.io + displayName: Cilium Endpoint + description: | + Cilium Endpoint represents the status of individual pods or nodes in + the cluster which are managed by Cilium, including enforcement status, + IP addressing and whether the networking is succesfully operational. + - kind: CiliumEndpointSlice + version: v2alpha1 + name: ciliumendpointslices.cilium.io + displayName: Cilium Endpoint Slice + description: | + Cilium Endpoint Slice represents the status of groups of pods or nodes + in the cluster which are managed by Cilium, including enforcement status, + IP addressing and whether the networking is succesfully operational. + - kind: CiliumEgressNATPolicy + version: v2alpha1 + name: ciliumegressnatpolicies.cilium.io + displayName: Cilium Egress NAT Policy + description: | + Cilium Endpoint Slice represents the status of groups of pods or nodes + in the cluster which are managed by Cilium, including enforcement status, + IP addressing and whether the networking is succesfully operational. + apiVersion: v2 + appVersion: 1.11.0 + created: "2021-12-06T11:35:16-08:00" + description: eBPF-based Networking, Security, and Observability + digest: 25b2211f80e3f8ab9db7e6bf633f31cef57a7b33d7978b9da23ecd28b8bceb1c + home: https://cilium.io/ + icon: https://cdn.jsdelivr.net/gh/cilium/cilium@v1.11.0/Documentation/images/logo-solo.svg + keywords: + - BPF + - eBPF + - Kubernetes + - Networking + - Security + - Observability + - Troubleshooting + kubeVersion: '>= 1.16.0-0' + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.11.0.tgz + version: 1.11.0 + - annotations: + artifacthub.io/crds: | + - kind: CiliumNetworkPolicy + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium Network Policy + description: | + Cilium Network Policies provide additional functionality beyond what + is provided by standard Kubernetes NetworkPolicy such as the ability + to allow traffic based on FQDNs, or to filter at Layer 7. + - kind: CiliumClusterwideNetworkPolicy + version: v2 + name: ciliumclusterwidenetworkpolicy + displayName: Cilium Clusterwide Network Policy + description: | + Cilium Clusterwide Network Policies support configuring network traffic + policiies across the entire cluster, including applying node firewalls. + - kind: CiliumExternalWorkload + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium External Workload + description: | + Cilium External Workload supports configuring the ability for external + non-Kubernetes workloads to join the cluster. + - kind: CiliumLocalRedirectPolicy + version: v2 + name: ciliumlocalredirectpolicy + displayName: Cilium Local Redirect Policy + description: | + Cilium Local Redirect Policy allows local redirects to be configured + within a node to support use cases like Node-Local DNS or KIAM. + - kind: CiliumNode + version: v2 + name: ciliumnode + displayName: Cilium Node + description: | + Cilium Node represents a node managed by Cilium. It contains a + specification to control various node specific configuration aspects + and a status section to represent the status of the node. + - kind: CiliumIdentity + version: v2 + name: ciliumidentity + displayName: Cilium Identity + description: | + Cilium Identity allows introspection into security identities that + Cilium allocates which identify sets of labels that are assigned to + individual endpoints in the cluster. + - kind: CiliumEndpoint + version: v2 + name: ciliumendpoint + displayName: Cilium Endpoint + description: | + Cilium Endpoint represents the status of individual pods or nodes in + the cluster which are managed by Cilium, including enforcement status, + IP addressing and whether the networking is succesfully operational. + apiVersion: v2 + appVersion: 1.11.0-rc3 + created: "2021-11-23T03:15:39+01:00" + description: eBPF-based Networking, Security, and Observability + digest: 431a9ef4b7250c3b29d0ebe39f319e46a93916bdc505d30a4fe0f84bd3661da3 + home: https://cilium.io/ + icon: https://cdn.jsdelivr.net/gh/cilium/cilium@v1.11.0-rc3/Documentation/images/logo-solo.svg + keywords: + - BPF + - eBPF + - Kubernetes + - Networking + - Security + - Observability + - Troubleshooting + kubeVersion: '>= 1.16.0-0' + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.11.0-rc3.tgz + version: 1.11.0-rc3 + - annotations: + artifacthub.io/crds: | + - kind: CiliumNetworkPolicy + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium Network Policy + description: | + Cilium Network Policies provide additional functionality beyond what + is provided by standard Kubernetes NetworkPolicy such as the ability + to allow traffic based on FQDNs, or to filter at Layer 7. + - kind: CiliumClusterwideNetworkPolicy + version: v2 + name: ciliumclusterwidenetworkpolicy + displayName: Cilium Clusterwide Network Policy + description: | + Cilium Clusterwide Network Policies support configuring network traffic + policiies across the entire cluster, including applying node firewalls. + - kind: CiliumExternalWorkload + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium External Workload + description: | + Cilium External Workload supports configuring the ability for external + non-Kubernetes workloads to join the cluster. + - kind: CiliumLocalRedirectPolicy + version: v2 + name: ciliumlocalredirectpolicy + displayName: Cilium Local Redirect Policy + description: | + Cilium Local Redirect Policy allows local redirects to be configured + within a node to support use cases like Node-Local DNS or KIAM. + - kind: CiliumNode + version: v2 + name: ciliumnode + displayName: Cilium Node + description: | + Cilium Node represents a node managed by Cilium. It contains a + specification to control various node specific configuration aspects + and a status section to represent the status of the node. + - kind: CiliumIdentity + version: v2 + name: ciliumidentity + displayName: Cilium Identity + description: | + Cilium Identity allows introspection into security identities that + Cilium allocates which identify sets of labels that are assigned to + individual endpoints in the cluster. + - kind: CiliumEndpoint + version: v2 + name: ciliumendpoint + displayName: Cilium Endpoint + description: | + Cilium Endpoint represents the status of individual pods or nodes in + the cluster which are managed by Cilium, including enforcement status, + IP addressing and whether the networking is succesfully operational. + apiVersion: v2 + appVersion: 1.11.0-rc2 + created: "2021-11-19T03:07:34+01:00" + description: eBPF-based Networking, Security, and Observability + digest: 89214ee65a86cec967fd599f4652831db3fe7e60f375465c0131db561611c089 + home: https://cilium.io/ + icon: https://cdn.jsdelivr.net/gh/cilium/cilium@v1.11.0-rc2/Documentation/images/logo-solo.svg + keywords: + - BPF + - eBPF + - Kubernetes + - Networking + - Security + - Observability + - Troubleshooting + kubeVersion: '>= 1.16.0-0' + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.11.0-rc2.tgz + version: 1.11.0-rc2 + - annotations: + artifacthub.io/crds: | + - kind: CiliumNetworkPolicy + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium Network Policy + description: | + Cilium Network Policies provide additional functionality beyond what + is provided by standard Kubernetes NetworkPolicy such as the ability + to allow traffic based on FQDNs, or to filter at Layer 7. + - kind: CiliumClusterwideNetworkPolicy + version: v2 + name: ciliumclusterwidenetworkpolicy + displayName: Cilium Clusterwide Network Policy + description: | + Cilium Clusterwide Network Policies support configuring network traffic + policiies across the entire cluster, including applying node firewalls. + - kind: CiliumExternalWorkload + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium External Workload + description: | + Cilium External Workload supports configuring the ability for external + non-Kubernetes workloads to join the cluster. + - kind: CiliumLocalRedirectPolicy + version: v2 + name: ciliumlocalredirectpolicy + displayName: Cilium Local Redirect Policy + description: | + Cilium Local Redirect Policy allows local redirects to be configured + within a node to support use cases like Node-Local DNS or KIAM. + - kind: CiliumNode + version: v2 + name: ciliumnode + displayName: Cilium Node + description: | + Cilium Node represents a node managed by Cilium. It contains a + specification to control various node specific configuration aspects + and a status section to represent the status of the node. + - kind: CiliumIdentity + version: v2 + name: ciliumidentity + displayName: Cilium Identity + description: | + Cilium Identity allows introspection into security identities that + Cilium allocates which identify sets of labels that are assigned to + individual endpoints in the cluster. + - kind: CiliumEndpoint + version: v2 + name: ciliumendpoint + displayName: Cilium Endpoint + description: | + Cilium Endpoint represents the status of individual pods or nodes in + the cluster which are managed by Cilium, including enforcement status, + IP addressing and whether the networking is succesfully operational. + apiVersion: v2 + appVersion: 1.11.0-rc1 + created: "2021-11-15T16:41:47+01:00" + description: eBPF-based Networking, Security, and Observability + digest: 0f4445d9ad16342a64eff893ddea5adcc874e0d58553093b0c21ccc5e12a7c64 + home: https://cilium.io/ + icon: https://cdn.jsdelivr.net/gh/cilium/cilium@v1.11.0-rc1/Documentation/images/logo-solo.svg + keywords: + - BPF + - eBPF + - Kubernetes + - Networking + - Security + - Observability + - Troubleshooting + kubeVersion: '>= 1.16.0-0' + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.11.0-rc1.tgz + version: 1.11.0-rc1 + - annotations: + artifacthub.io/crds: | + - kind: CiliumNetworkPolicy + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium Network Policy + description: | + Cilium Network Policies provide additional functionality beyond what + is provided by standard Kubernetes NetworkPolicy such as the ability + to allow traffic based on FQDNs, or to filter at Layer 7. + - kind: CiliumClusterwideNetworkPolicy + version: v2 + name: ciliumclusterwidenetworkpolicy + displayName: Cilium Clusterwide Network Policy + description: | + Cilium Clusterwide Network Policies support configuring network traffic + policiies across the entire cluster, including applying node firewalls. + - kind: CiliumExternalWorkload + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium External Workload + description: | + Cilium External Workload supports configuring the ability for external + non-Kubernetes workloads to join the cluster. + - kind: CiliumLocalRedirectPolicy + version: v2 + name: ciliumlocalredirectpolicy + displayName: Cilium Local Redirect Policy + description: | + Cilium Local Redirect Policy allows local redirects to be configured + within a node to support use cases like Node-Local DNS or KIAM. + - kind: CiliumNode + version: v2 + name: ciliumnode + displayName: Cilium Node + description: | + Cilium Node represents a node managed by Cilium. It contains a + specification to control various node specific configuration aspects + and a status section to represent the status of the node. + - kind: CiliumIdentity + version: v2 + name: ciliumidentity + displayName: Cilium Identity + description: | + Cilium Identity allows introspection into security identities that + Cilium allocates which identify sets of labels that are assigned to + individual endpoints in the cluster. + - kind: CiliumEndpoint + version: v2 + name: ciliumendpoint + displayName: Cilium Endpoint + description: | + Cilium Endpoint represents the status of individual pods or nodes in + the cluster which are managed by Cilium, including enforcement status, + IP addressing and whether the networking is succesfully operational. + apiVersion: v2 + appVersion: 1.11.0-rc0 + created: "2021-09-30T17:12:33-07:00" + description: eBPF-based Networking, Security, and Observability + digest: 4fae7ba0900ce64fd5e650c1333316f054780473806bd78897dcdc373660d219 + home: https://cilium.io/ + icon: https://cdn.jsdelivr.net/gh/cilium/cilium@v1.11.0-rc0/Documentation/images/logo-solo.svg + keywords: + - BPF + - eBPF + - Kubernetes + - Networking + - Security + - Observability + - Troubleshooting + kubeVersion: '>= 1.16.0-0' + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.11.0-rc0.tgz + version: 1.11.0-rc0 + - annotations: + artifacthub.io/crds: | + - kind: CiliumNetworkPolicy + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium Network Policy + description: | + Cilium Network Policies provide additional functionality beyond what + is provided by standard Kubernetes NetworkPolicy such as the ability + to allow traffic based on FQDNs, or to filter at Layer 7. + - kind: CiliumClusterwideNetworkPolicy + version: v2 + name: ciliumclusterwidenetworkpolicy + displayName: Cilium Clusterwide Network Policy + description: | + Cilium Clusterwide Network Policies support configuring network traffic + policiies across the entire cluster, including applying node firewalls. + - kind: CiliumExternalWorkload + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium External Workload + description: | + Cilium External Workload supports configuring the ability for external + non-Kubernetes workloads to join the cluster. + - kind: CiliumLocalRedirectPolicy + version: v2 + name: ciliumlocalredirectpolicy + displayName: Cilium Local Redirect Policy + description: | + Cilium Local Redirect Policy allows local redirects to be configured + within a node to support use cases like Node-Local DNS or KIAM. + - kind: CiliumNode + version: v2 + name: ciliumnode + displayName: Cilium Node + description: | + Cilium Node represents a node managed by Cilium. It contains a + specification to control various node specific configuration aspects + and a status section to represent the status of the node. + - kind: CiliumIdentity + version: v2 + name: ciliumidentity + displayName: Cilium Identity + description: | + Cilium Identity allows introspection into security identities that + Cilium allocates which identify sets of labels that are assigned to + individual endpoints in the cluster. + - kind: CiliumEndpoint + version: v2 + name: ciliumendpoint + displayName: Cilium Endpoint + description: | + Cilium Endpoint represents the status of individual pods or nodes in + the cluster which are managed by Cilium, including enforcement status, + IP addressing and whether the networking is succesfully operational. + apiVersion: v2 + appVersion: 1.10.7 + created: "2022-01-18T18:00:14-08:00" + description: eBPF-based Networking, Security, and Observability + digest: a0ba968a349c9fdb0fa728aad35fecd55a3565d5af1725ebbb965739ecdf5585 + home: https://cilium.io/ + icon: https://cdn.jsdelivr.net/gh/cilium/cilium@v1.10.7/Documentation/images/logo-solo.svg + keywords: + - BPF + - eBPF + - Kubernetes + - Networking + - Security + - Observability + - Troubleshooting + kubeVersion: '>= 1.16.0-0' + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.10.7.tgz + version: 1.10.7 + - annotations: + artifacthub.io/crds: | + - kind: CiliumNetworkPolicy + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium Network Policy + description: | + Cilium Network Policies provide additional functionality beyond what + is provided by standard Kubernetes NetworkPolicy such as the ability + to allow traffic based on FQDNs, or to filter at Layer 7. + - kind: CiliumClusterwideNetworkPolicy + version: v2 + name: ciliumclusterwidenetworkpolicy + displayName: Cilium Clusterwide Network Policy + description: | + Cilium Clusterwide Network Policies support configuring network traffic + policiies across the entire cluster, including applying node firewalls. + - kind: CiliumExternalWorkload + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium External Workload + description: | + Cilium External Workload supports configuring the ability for external + non-Kubernetes workloads to join the cluster. + - kind: CiliumLocalRedirectPolicy + version: v2 + name: ciliumlocalredirectpolicy + displayName: Cilium Local Redirect Policy + description: | + Cilium Local Redirect Policy allows local redirects to be configured + within a node to support use cases like Node-Local DNS or KIAM. + - kind: CiliumNode + version: v2 + name: ciliumnode + displayName: Cilium Node + description: | + Cilium Node represents a node managed by Cilium. It contains a + specification to control various node specific configuration aspects + and a status section to represent the status of the node. + - kind: CiliumIdentity + version: v2 + name: ciliumidentity + displayName: Cilium Identity + description: | + Cilium Identity allows introspection into security identities that + Cilium allocates which identify sets of labels that are assigned to + individual endpoints in the cluster. + - kind: CiliumEndpoint + version: v2 + name: ciliumendpoint + displayName: Cilium Endpoint + description: | + Cilium Endpoint represents the status of individual pods or nodes in + the cluster which are managed by Cilium, including enforcement status, + IP addressing and whether the networking is succesfully operational. + apiVersion: v2 + appVersion: 1.10.6 + created: "2021-12-10T10:18:21-08:00" + description: eBPF-based Networking, Security, and Observability + digest: fb97f0c77c63a250df50e835bd850231f4b084634436cd663b248853cb02a645 + home: https://cilium.io/ + icon: https://cdn.jsdelivr.net/gh/cilium/cilium@v1.10.6/Documentation/images/logo-solo.svg + keywords: + - BPF + - eBPF + - Kubernetes + - Networking + - Security + - Observability + - Troubleshooting + kubeVersion: '>= 1.16.0-0' + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.10.6.tgz + version: 1.10.6 + - annotations: + artifacthub.io/crds: | + - kind: CiliumNetworkPolicy + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium Network Policy + description: | + Cilium Network Policies provide additional functionality beyond what + is provided by standard Kubernetes NetworkPolicy such as the ability + to allow traffic based on FQDNs, or to filter at Layer 7. + - kind: CiliumClusterwideNetworkPolicy + version: v2 + name: ciliumclusterwidenetworkpolicy + displayName: Cilium Clusterwide Network Policy + description: | + Cilium Clusterwide Network Policies support configuring network traffic + policiies across the entire cluster, including applying node firewalls. + - kind: CiliumExternalWorkload + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium External Workload + description: | + Cilium External Workload supports configuring the ability for external + non-Kubernetes workloads to join the cluster. + - kind: CiliumLocalRedirectPolicy + version: v2 + name: ciliumlocalredirectpolicy + displayName: Cilium Local Redirect Policy + description: | + Cilium Local Redirect Policy allows local redirects to be configured + within a node to support use cases like Node-Local DNS or KIAM. + - kind: CiliumNode + version: v2 + name: ciliumnode + displayName: Cilium Node + description: | + Cilium Node represents a node managed by Cilium. It contains a + specification to control various node specific configuration aspects + and a status section to represent the status of the node. + - kind: CiliumIdentity + version: v2 + name: ciliumidentity + displayName: Cilium Identity + description: | + Cilium Identity allows introspection into security identities that + Cilium allocates which identify sets of labels that are assigned to + individual endpoints in the cluster. + - kind: CiliumEndpoint + version: v2 + name: ciliumendpoint + displayName: Cilium Endpoint + description: | + Cilium Endpoint represents the status of individual pods or nodes in + the cluster which are managed by Cilium, including enforcement status, + IP addressing and whether the networking is succesfully operational. + apiVersion: v2 + appVersion: 1.10.5 + created: "2021-10-13T18:35:30-07:00" + description: eBPF-based Networking, Security, and Observability + digest: 0cc2da702807aadaec35c7e13c034d5ce80f5e6a3ccf21c423c39cbb895370bd + home: https://cilium.io/ + icon: https://cdn.jsdelivr.net/gh/cilium/cilium@v1.10.5/Documentation/images/logo-solo.svg + keywords: + - BPF + - eBPF + - Kubernetes + - Networking + - Security + - Observability + - Troubleshooting + kubeVersion: '>= 1.16.0-0' + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.10.5.tgz + version: 1.10.5 + - annotations: + artifacthub.io/crds: | + - kind: CiliumNetworkPolicy + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium Network Policy + description: | + Cilium Network Policies provide additional functionality beyond what + is provided by standard Kubernetes NetworkPolicy such as the ability + to allow traffic based on FQDNs, or to filter at Layer 7. + - kind: CiliumClusterwideNetworkPolicy + version: v2 + name: ciliumclusterwidenetworkpolicy + displayName: Cilium Clusterwide Network Policy + description: | + Cilium Clusterwide Network Policies support configuring network traffic + policiies across the entire cluster, including applying node firewalls. + - kind: CiliumExternalWorkload + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium External Workload + description: | + Cilium External Workload supports configuring the ability for external + non-Kubernetes workloads to join the cluster. + - kind: CiliumLocalRedirectPolicy + version: v2 + name: ciliumlocalredirectpolicy + displayName: Cilium Local Redirect Policy + description: | + Cilium Local Redirect Policy allows local redirects to be configured + within a node to support use cases like Node-Local DNS or KIAM. + - kind: CiliumNode + version: v2 + name: ciliumnode + displayName: Cilium Node + description: | + Cilium Node represents a node managed by Cilium. It contains a + specification to control various node specific configuration aspects + and a status section to represent the status of the node. + - kind: CiliumIdentity + version: v2 + name: ciliumidentity + displayName: Cilium Identity + description: | + Cilium Identity allows introspection into security identities that + Cilium allocates which identify sets of labels that are assigned to + individual endpoints in the cluster. + - kind: CiliumEndpoint + version: v2 + name: ciliumendpoint + displayName: Cilium Endpoint + description: | + Cilium Endpoint represents the status of individual pods or nodes in + the cluster which are managed by Cilium, including enforcement status, + IP addressing and whether the networking is succesfully operational. + apiVersion: v2 + appVersion: 1.10.4 + created: "2021-09-02T17:36:50-07:00" + description: eBPF-based Networking, Security, and Observability + digest: 9d42dac3d85b934a20195822c3e24eee62cc795789363a5b4158946c0d225e76 + home: https://cilium.io/ + icon: https://cdn.jsdelivr.net/gh/cilium/cilium@v1.10.4/Documentation/images/logo-solo.svg + keywords: + - BPF + - eBPF + - Kubernetes + - Networking + - Security + - Observability + - Troubleshooting + kubeVersion: '>= 1.16.0-0' + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.10.4.tgz + version: 1.10.4 + - annotations: + artifacthub.io/crds: | + - kind: CiliumNetworkPolicy + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium Network Policy + description: | + Cilium Network Policies provide additional functionality beyond what + is provided by standard Kubernetes NetworkPolicy such as the ability + to allow traffic based on FQDNs, or to filter at Layer 7. + - kind: CiliumClusterwideNetworkPolicy + version: v2 + name: ciliumclusterwidenetworkpolicy + displayName: Cilium Clusterwide Network Policy + description: | + Cilium Clusterwide Network Policies support configuring network traffic + policiies across the entire cluster, including applying node firewalls. + - kind: CiliumExternalWorkload + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium External Workload + description: | + Cilium External Workload supports configuring the ability for external + non-Kubernetes workloads to join the cluster. + - kind: CiliumLocalRedirectPolicy + version: v2 + name: ciliumlocalredirectpolicy + displayName: Cilium Local Redirect Policy + description: | + Cilium Local Redirect Policy allows local redirects to be configured + within a node to support use cases like Node-Local DNS or KIAM. + - kind: CiliumNode + version: v2 + name: ciliumnode + displayName: Cilium Node + description: | + Cilium Node represents a node managed by Cilium. It contains a + specification to control various node specific configuration aspects + and a status section to represent the status of the node. + - kind: CiliumIdentity + version: v2 + name: ciliumidentity + displayName: Cilium Identity + description: | + Cilium Identity allows introspection into security identities that + Cilium allocates which identify sets of labels that are assigned to + individual endpoints in the cluster. + - kind: CiliumEndpoint + version: v2 + name: ciliumendpoint + displayName: Cilium Endpoint + description: | + Cilium Endpoint represents the status of individual pods or nodes in + the cluster which are managed by Cilium, including enforcement status, + IP addressing and whether the networking is succesfully operational. + apiVersion: v2 + appVersion: 1.10.3 + created: "2021-07-15T18:31:06+02:00" + description: eBPF-based Networking, Security, and Observability + digest: 3b62d18aa71913fd61ee10251f99a9b17c6e899154fe6f060a51676d3bfffa09 + home: https://cilium.io/ + icon: https://cdn.jsdelivr.net/gh/cilium/cilium@v1.10.3/Documentation/images/logo-solo.svg + keywords: + - BPF + - eBPF + - Kubernetes + - Networking + - Security + - Observability + - Troubleshooting + kubeVersion: '>= 1.16.0-0' + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.10.3.tgz + version: 1.10.3 + - annotations: + artifacthub.io/crds: | + - kind: CiliumNetworkPolicy + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium Network Policy + description: | + Cilium Network Policies provide additional functionality beyond what + is provided by standard Kubernetes NetworkPolicy such as the ability + to allow traffic based on FQDNs, or to filter at Layer 7. + - kind: CiliumClusterwideNetworkPolicy + version: v2 + name: ciliumclusterwidenetworkpolicy + displayName: Cilium Clusterwide Network Policy + description: | + Cilium Clusterwide Network Policies support configuring network traffic + policiies across the entire cluster, including applying node firewalls. + - kind: CiliumExternalWorkload + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium External Workload + description: | + Cilium External Workload supports configuring the ability for external + non-Kubernetes workloads to join the cluster. + - kind: CiliumLocalRedirectPolicy + version: v2 + name: ciliumlocalredirectpolicy + displayName: Cilium Local Redirect Policy + description: | + Cilium Local Redirect Policy allows local redirects to be configured + within a node to support use cases like Node-Local DNS or KIAM. + - kind: CiliumNode + version: v2 + name: ciliumnode + displayName: Cilium Node + description: | + Cilium Node represents a node managed by Cilium. It contains a + specification to control various node specific configuration aspects + and a status section to represent the status of the node. + - kind: CiliumIdentity + version: v2 + name: ciliumidentity + displayName: Cilium Identity + description: | + Cilium Identity allows introspection into security identities that + Cilium allocates which identify sets of labels that are assigned to + individual endpoints in the cluster. + - kind: CiliumEndpoint + version: v2 + name: ciliumendpoint + displayName: Cilium Endpoint + description: | + Cilium Endpoint represents the status of individual pods or nodes in + the cluster which are managed by Cilium, including enforcement status, + IP addressing and whether the networking is succesfully operational. + apiVersion: v2 + appVersion: 1.10.2 + created: "2021-07-02T16:55:40+02:00" + description: eBPF-based Networking, Security, and Observability + digest: a36e66b107d7149fb31ad85cdeb11c1f28f628df91183ef88cade02875d8d00b + home: https://cilium.io/ + icon: https://cdn.jsdelivr.net/gh/cilium/cilium@v1.10.2/Documentation/images/logo-solo.svg + keywords: + - BPF + - eBPF + - Kubernetes + - Networking + - Security + - Observability + - Troubleshooting + kubeVersion: '>= 1.16.0-0' + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.10.2.tgz + version: 1.10.2 + - annotations: + artifacthub.io/crds: | + - kind: CiliumNetworkPolicy + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium Network Policy + description: | + Cilium Network Policies provide additional functionality beyond what + is provided by standard Kubernetes NetworkPolicy such as the ability + to allow traffic based on FQDNs, or to filter at Layer 7. + - kind: CiliumClusterwideNetworkPolicy + version: v2 + name: ciliumclusterwidenetworkpolicy + displayName: Cilium Clusterwide Network Policy + description: | + Cilium Clusterwide Network Policies support configuring network traffic + policiies across the entire cluster, including applying node firewalls. + - kind: CiliumExternalWorkload + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium External Workload + description: | + Cilium External Workload supports configuring the ability for external + non-Kubernetes workloads to join the cluster. + - kind: CiliumLocalRedirectPolicy + version: v2 + name: ciliumlocalredirectpolicy + displayName: Cilium Local Redirect Policy + description: | + Cilium Local Redirect Policy allows local redirects to be configured + within a node to support use cases like Node-Local DNS or KIAM. + - kind: CiliumNode + version: v2 + name: ciliumnode + displayName: Cilium Node + description: | + Cilium Node represents a node managed by Cilium. It contains a + specification to control various node specific configuration aspects + and a status section to represent the status of the node. + - kind: CiliumIdentity + version: v2 + name: ciliumidentity + displayName: Cilium Identity + description: | + Cilium Identity allows introspection into security identities that + Cilium allocates which identify sets of labels that are assigned to + individual endpoints in the cluster. + - kind: CiliumEndpoint + version: v2 + name: ciliumendpoint + displayName: Cilium Endpoint + description: | + Cilium Endpoint represents the status of individual pods or nodes in + the cluster which are managed by Cilium, including enforcement status, + IP addressing and whether the networking is succesfully operational. + apiVersion: v2 + appVersion: 1.10.1 + created: "2021-06-16T13:31:12+02:00" + description: eBPF-based Networking, Security, and Observability + digest: 04cbfc6294d828b4dffc72950656ff7e6a5159cd092ab3e5f645adecbc81b73e + home: https://cilium.io/ + icon: https://cdn.jsdelivr.net/gh/cilium/cilium@v1.10.1/Documentation/images/logo-solo.svg + keywords: + - BPF + - eBPF + - Kubernetes + - Networking + - Security + - Observability + - Troubleshooting + kubeVersion: '>= 1.16.0-0' + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.10.1.tgz + version: 1.10.1 + - annotations: + artifacthub.io/crds: | + - kind: CiliumNetworkPolicy + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium Network Policy + description: | + Cilium Network Policies provide additional functionality beyond what + is provided by standard Kubernetes NetworkPolicy such as the ability + to allow traffic based on FQDNs, or to filter at Layer 7. + - kind: CiliumClusterwideNetworkPolicy + version: v2 + name: ciliumclusterwidenetworkpolicy + displayName: Cilium Clusterwide Network Policy + description: | + Cilium Clusterwide Network Policies support configuring network traffic + policiies across the entire cluster, including applying node firewalls. + - kind: CiliumExternalWorkload + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium External Workload + description: | + Cilium External Workload supports configuring the ability for external + non-Kubernetes workloads to join the cluster. + - kind: CiliumLocalRedirectPolicy + version: v2 + name: ciliumlocalredirectpolicy + displayName: Cilium Local Redirect Policy + description: | + Cilium Local Redirect Policy allows local redirects to be configured + within a node to support use cases like Node-Local DNS or KIAM. + - kind: CiliumNode + version: v2 + name: ciliumnode + displayName: Cilium Node + description: | + Cilium Node represents a node managed by Cilium. It contains a + specification to control various node specific configuration aspects + and a status section to represent the status of the node. + - kind: CiliumIdentity + version: v2 + name: ciliumidentity + displayName: Cilium Identity + description: | + Cilium Identity allows introspection into security identities that + Cilium allocates which identify sets of labels that are assigned to + individual endpoints in the cluster. + - kind: CiliumEndpoint + version: v2 + name: ciliumendpoint + displayName: Cilium Endpoint + description: | + Cilium Endpoint represents the status of individual pods or nodes in + the cluster which are managed by Cilium, including enforcement status, + IP addressing and whether the networking is succesfully operational. + apiVersion: v2 + appVersion: 1.10.0 + created: "2021-05-20T15:27:01+02:00" + description: eBPF-based Networking, Security, and Observability + digest: 18b48dd562f488516ec52c7df2dd8b48998761e5a6f06c3940f597a170e38f23 + home: https://cilium.io/ + icon: https://cdn.jsdelivr.net/gh/cilium/cilium@v1.10.0/Documentation/images/logo-solo.svg + keywords: + - BPF + - eBPF + - Kubernetes + - Networking + - Security + - Observability + - Troubleshooting + kubeVersion: '>= 1.16.0-0' + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.10.0.tgz + version: 1.10.0 + - annotations: + artifacthub.io/crds: | + - kind: CiliumNetworkPolicy + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium Network Policy + description: | + Cilium Network Policies provide additional functionality beyond what + is provided by standard Kubernetes NetworkPolicy such as the ability + to allow traffic based on FQDNs, or to filter at Layer 7. + - kind: CiliumClusterwideNetworkPolicy + version: v2 + name: ciliumclusterwidenetworkpolicy + displayName: Cilium Clusterwide Network Policy + description: | + Cilium Clusterwide Network Policies support configuring network traffic + policiies across the entire cluster, including applying node firewalls. + - kind: CiliumExternalWorkload + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium External Workload + description: | + Cilium External Workload supports configuring the ability for external + non-Kubernetes workloads to join the cluster. + - kind: CiliumLocalRedirectPolicy + version: v2 + name: ciliumlocalredirectpolicy + displayName: Cilium Local Redirect Policy + description: | + Cilium Local Redirect Policy allows local redirects to be configured + within a node to support use cases like Node-Local DNS or KIAM. + - kind: CiliumNode + version: v2 + name: ciliumnode + displayName: Cilium Node + description: | + Cilium Node represents a node managed by Cilium. It contains a + specification to control various node specific configuration aspects + and a status section to represent the status of the node. + - kind: CiliumIdentity + version: v2 + name: ciliumidentity + displayName: Cilium Identity + description: | + Cilium Identity allows introspection into security identities that + Cilium allocates which identify sets of labels that are assigned to + individual endpoints in the cluster. + - kind: CiliumEndpoint + version: v2 + name: ciliumendpoint + displayName: Cilium Endpoint + description: | + Cilium Endpoint represents the status of individual pods or nodes in + the cluster which are managed by Cilium, including enforcement status, + IP addressing and whether the networking is succesfully operational. + apiVersion: v2 + appVersion: 1.10.0-rc2 + created: "2021-05-17T15:52:03+02:00" + description: eBPF-based Networking, Security, and Observability + digest: fa874d786ec2e96d366af79a1c49b6d80541f5b792a3618f1886ab3987124eed + home: https://cilium.io/ + icon: https://cdn.jsdelivr.net/gh/cilium/cilium@v1.10.0-rc2/Documentation/images/logo-solo.svg + keywords: + - BPF + - eBPF + - Kubernetes + - Networking + - Security + - Observability + - Troubleshooting + kubeVersion: '>= 1.16.0-0' + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.10.0-rc2.tgz + version: 1.10.0-rc2 + - annotations: + artifacthub.io/crds: | + - kind: CiliumNetworkPolicy + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium Network Policy + description: | + Cilium Network Policies provide additional functionality beyond what + is provided by standard Kubernetes NetworkPolicy such as the ability + to allow traffic based on FQDNs, or to filter at Layer 7. + - kind: CiliumClusterwideNetworkPolicy + version: v2 + name: ciliumclusterwidenetworkpolicy + displayName: Cilium Clusterwide Network Policy + description: | + Cilium Clusterwide Network Policies support configuring network traffic + policiies across the entire cluster, including applying node firewalls. + - kind: CiliumExternalWorkload + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium External Workload + description: | + Cilium External Workload supports configuring the ability for external + non-Kubernetes workloads to join the cluster. + - kind: CiliumLocalRedirectPolicy + version: v2 + name: ciliumlocalredirectpolicy + displayName: Cilium Local Redirect Policy + description: | + Cilium Local Redirect Policy allows local redirects to be configured + within a node to support use cases like Node-Local DNS or KIAM. + - kind: CiliumNode + version: v2 + name: ciliumnode + displayName: Cilium Node + description: | + Cilium Node represents a node managed by Cilium. It contains a + specification to control various node specific configuration aspects + and a status section to represent the status of the node. + - kind: CiliumIdentity + version: v2 + name: ciliumidentity + displayName: Cilium Identity + description: | + Cilium Identity allows introspection into security identities that + Cilium allocates which identify sets of labels that are assigned to + individual endpoints in the cluster. + - kind: CiliumEndpoint + version: v2 + name: ciliumendpoint + displayName: Cilium Endpoint + description: | + Cilium Endpoint represents the status of individual pods or nodes in + the cluster which are managed by Cilium, including enforcement status, + IP addressing and whether the networking is succesfully operational. + apiVersion: v2 + appVersion: 1.10.0-rc1 + created: "2021-04-28T12:52:38+02:00" + description: eBPF-based Networking, Security, and Observability + digest: 3b2e52ee75077a721e9f9c8b405594597b21703718f5c50322c6042fa6d80576 + home: https://cilium.io/ + icon: https://cdn.jsdelivr.net/gh/cilium/cilium@v1.10.0-rc1/Documentation/images/logo-solo.svg + keywords: + - BPF + - eBPF + - Kubernetes + - Networking + - Security + - Observability + - Troubleshooting + kubeVersion: '>= 1.16.0-0' + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.10.0-rc1.tgz + version: 1.10.0-rc1 + - annotations: + artifacthub.io/crds: | + - kind: CiliumNetworkPolicy + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium Network Policy + description: | + Cilium Network Policies provide additional functionality beyond what + is provided by standard Kubernetes NetworkPolicy such as the ability + to allow traffic based on FQDNs, or to filter at Layer 7. + - kind: CiliumClusterwideNetworkPolicy + version: v2 + name: ciliumclusterwidenetworkpolicy + displayName: Cilium Clusterwide Network Policy + description: | + Cilium Clusterwide Network Policies support configuring network traffic + policiies across the entire cluster, including applying node firewalls. + - kind: CiliumExternalWorkload + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium External Workload + description: | + Cilium External Workload supports configuring the ability for external + non-Kubernetes workloads to join the cluster. + - kind: CiliumLocalRedirectPolicy + version: v2 + name: ciliumlocalredirectpolicy + displayName: Cilium Local Redirect Policy + description: | + Cilium Local Redirect Policy allows local redirects to be configured + within a node to support use cases like Node-Local DNS or KIAM. + - kind: CiliumNode + version: v2 + name: ciliumnode + displayName: Cilium Node + description: | + Cilium Node represents a node managed by Cilium. It contains a + specification to control various node specific configuration aspects + and a status section to represent the status of the node. + - kind: CiliumIdentity + version: v2 + name: ciliumidentity + displayName: Cilium Identity + description: | + Cilium Identity allows introspection into security identities that + Cilium allocates which identify sets of labels that are assigned to + individual endpoints in the cluster. + - kind: CiliumEndpoint + version: v2 + name: ciliumendpoint + displayName: Cilium Endpoint + description: | + Cilium Endpoint represents the status of individual pods or nodes in + the cluster which are managed by Cilium, including enforcement status, + IP addressing and whether the networking is succesfully operational. + apiVersion: v2 + appVersion: 1.10.0-rc0 + created: "2021-03-11T22:45:19+01:00" + description: eBPF-based Networking, Security, and Observability + digest: 16da50ed7d405e2f5fc0728a765038812613b8eb98fe33da82b3ef952cf52faf + home: https://cilium.io/ + icon: https://cdn.jsdelivr.net/gh/cilium/cilium@v1.10.0-rc0/Documentation/images/logo-solo.svg + keywords: + - BPF + - eBPF + - Kubernetes + - Networking + - Security + - Observability + - Troubleshooting + kubeVersion: '>= 1.13.0-0' + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.10.0-rc0.tgz + version: 1.10.0-rc0 + - annotations: + artifacthub.io/crds: | + - kind: CiliumNetworkPolicy + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium Network Policy + description: | + Cilium Network Policies provide additional functionality beyond what + is provided by standard Kubernetes NetworkPolicy such as the ability + to allow traffic based on FQDNs, or to filter at Layer 7. + - kind: CiliumClusterwideNetworkPolicy + version: v2 + name: ciliumclusterwidenetworkpolicy + displayName: Cilium Clusterwide Network Policy + description: | + Cilium Clusterwide Network Policies support configuring network traffic + policiies across the entire cluster, including applying node firewalls. + - kind: CiliumExternalWorkload + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium External Workload + description: | + Cilium External Workload supports configuring the ability for external + non-Kubernetes workloads to join the cluster. + - kind: CiliumLocalRedirectPolicy + version: v2 + name: ciliumlocalredirectpolicy + displayName: Cilium Local Redirect Policy + description: | + Cilium Local Redirect Policy allows local redirects to be configured + within a node to support use cases like Node-Local DNS or KIAM. + - kind: CiliumNode + version: v2 + name: ciliumnode + displayName: Cilium Node + description: | + Cilium Node represents a node managed by Cilium. It contains a + specification to control various node specific configuration aspects + and a status section to represent the status of the node. + - kind: CiliumIdentity + version: v2 + name: ciliumidentity + displayName: Cilium Identity + description: | + Cilium Identity allows introspection into security identities that + Cilium allocates which identify sets of labels that are assigned to + individual endpoints in the cluster. + - kind: CiliumEndpoint + version: v2 + name: ciliumendpoint + displayName: Cilium Endpoint + description: | + Cilium Endpoint represents the status of individual pods or nodes in + the cluster which are managed by Cilium, including enforcement status, + IP addressing and whether the networking is succesfully operational. + apiVersion: v2 + appVersion: 1.9.12 + created: "2022-01-18T17:59:36-08:00" + description: eBPF-based Networking, Security, and Observability + digest: 65402502bcefd9f2d69cf8c8141fc2713c16229b147eb7b78e7d1df20bc6b799 + home: https://cilium.io/ + icon: https://cdn.jsdelivr.net/gh/cilium/cilium@v1.9.12/Documentation/images/logo-solo.svg + keywords: + - BPF + - eBPF + - Kubernetes + - Networking + - Security + - Observability + - Troubleshooting + kubeVersion: '>= 1.12.0-0' + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.9.12.tgz + version: 1.9.12 + - annotations: + artifacthub.io/crds: | + - kind: CiliumNetworkPolicy + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium Network Policy + description: | + Cilium Network Policies provide additional functionality beyond what + is provided by standard Kubernetes NetworkPolicy such as the ability + to allow traffic based on FQDNs, or to filter at Layer 7. + - kind: CiliumClusterwideNetworkPolicy + version: v2 + name: ciliumclusterwidenetworkpolicy + displayName: Cilium Clusterwide Network Policy + description: | + Cilium Clusterwide Network Policies support configuring network traffic + policiies across the entire cluster, including applying node firewalls. + - kind: CiliumExternalWorkload + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium External Workload + description: | + Cilium External Workload supports configuring the ability for external + non-Kubernetes workloads to join the cluster. + - kind: CiliumLocalRedirectPolicy + version: v2 + name: ciliumlocalredirectpolicy + displayName: Cilium Local Redirect Policy + description: | + Cilium Local Redirect Policy allows local redirects to be configured + within a node to support use cases like Node-Local DNS or KIAM. + - kind: CiliumNode + version: v2 + name: ciliumnode + displayName: Cilium Node + description: | + Cilium Node represents a node managed by Cilium. It contains a + specification to control various node specific configuration aspects + and a status section to represent the status of the node. + - kind: CiliumIdentity + version: v2 + name: ciliumidentity + displayName: Cilium Identity + description: | + Cilium Identity allows introspection into security identities that + Cilium allocates which identify sets of labels that are assigned to + individual endpoints in the cluster. + - kind: CiliumEndpoint + version: v2 + name: ciliumendpoint + displayName: Cilium Endpoint + description: | + Cilium Endpoint represents the status of individual pods or nodes in + the cluster which are managed by Cilium, including enforcement status, + IP addressing and whether the networking is succesfully operational. + apiVersion: v2 + appVersion: 1.9.11 + created: "2021-11-05T16:42:40-07:00" + description: eBPF-based Networking, Security, and Observability + digest: 66765c838d7e849cc32d8ec041941c46c75b3cf58ab68f822c24c7452349d4f0 + home: https://cilium.io/ + icon: https://cdn.jsdelivr.net/gh/cilium/cilium@v1.9.11/Documentation/images/logo-solo.svg + keywords: + - BPF + - eBPF + - Kubernetes + - Networking + - Security + - Observability + - Troubleshooting + kubeVersion: '>= 1.12.0-0' + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.9.11.tgz + version: 1.9.11 + - annotations: + artifacthub.io/crds: | + - kind: CiliumNetworkPolicy + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium Network Policy + description: | + Cilium Network Policies provide additional functionality beyond what + is provided by standard Kubernetes NetworkPolicy such as the ability + to allow traffic based on FQDNs, or to filter at Layer 7. + - kind: CiliumClusterwideNetworkPolicy + version: v2 + name: ciliumclusterwidenetworkpolicy + displayName: Cilium Clusterwide Network Policy + description: | + Cilium Clusterwide Network Policies support configuring network traffic + policiies across the entire cluster, including applying node firewalls. + - kind: CiliumExternalWorkload + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium External Workload + description: | + Cilium External Workload supports configuring the ability for external + non-Kubernetes workloads to join the cluster. + - kind: CiliumLocalRedirectPolicy + version: v2 + name: ciliumlocalredirectpolicy + displayName: Cilium Local Redirect Policy + description: | + Cilium Local Redirect Policy allows local redirects to be configured + within a node to support use cases like Node-Local DNS or KIAM. + - kind: CiliumNode + version: v2 + name: ciliumnode + displayName: Cilium Node + description: | + Cilium Node represents a node managed by Cilium. It contains a + specification to control various node specific configuration aspects + and a status section to represent the status of the node. + - kind: CiliumIdentity + version: v2 + name: ciliumidentity + displayName: Cilium Identity + description: | + Cilium Identity allows introspection into security identities that + Cilium allocates which identify sets of labels that are assigned to + individual endpoints in the cluster. + - kind: CiliumEndpoint + version: v2 + name: ciliumendpoint + displayName: Cilium Endpoint + description: | + Cilium Endpoint represents the status of individual pods or nodes in + the cluster which are managed by Cilium, including enforcement status, + IP addressing and whether the networking is succesfully operational. + apiVersion: v2 + appVersion: 1.9.10 + created: "2021-09-02T17:36:38-07:00" + description: eBPF-based Networking, Security, and Observability + digest: 4f7423c8cd8bd5338f29cfad6f096b5410f11a1a1625254781d0aa2cb3855638 + home: https://cilium.io/ + icon: https://cdn.jsdelivr.net/gh/cilium/cilium@v1.9.10/Documentation/images/logo-solo.svg + keywords: + - BPF + - eBPF + - Kubernetes + - Networking + - Security + - Observability + - Troubleshooting + kubeVersion: '>= 1.12.0-0' + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.9.10.tgz + version: 1.9.10 + - annotations: + artifacthub.io/crds: | + - kind: CiliumNetworkPolicy + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium Network Policy + description: | + Cilium Network Policies provide additional functionality beyond what + is provided by standard Kubernetes NetworkPolicy such as the ability + to allow traffic based on FQDNs, or to filter at Layer 7. + - kind: CiliumClusterwideNetworkPolicy + version: v2 + name: ciliumclusterwidenetworkpolicy + displayName: Cilium Clusterwide Network Policy + description: | + Cilium Clusterwide Network Policies support configuring network traffic + policiies across the entire cluster, including applying node firewalls. + - kind: CiliumExternalWorkload + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium External Workload + description: | + Cilium External Workload supports configuring the ability for external + non-Kubernetes workloads to join the cluster. + - kind: CiliumLocalRedirectPolicy + version: v2 + name: ciliumlocalredirectpolicy + displayName: Cilium Local Redirect Policy + description: | + Cilium Local Redirect Policy allows local redirects to be configured + within a node to support use cases like Node-Local DNS or KIAM. + - kind: CiliumNode + version: v2 + name: ciliumnode + displayName: Cilium Node + description: | + Cilium Node represents a node managed by Cilium. It contains a + specification to control various node specific configuration aspects + and a status section to represent the status of the node. + - kind: CiliumIdentity + version: v2 + name: ciliumidentity + displayName: Cilium Identity + description: | + Cilium Identity allows introspection into security identities that + Cilium allocates which identify sets of labels that are assigned to + individual endpoints in the cluster. + - kind: CiliumEndpoint + version: v2 + name: ciliumendpoint + displayName: Cilium Endpoint + description: | + Cilium Endpoint represents the status of individual pods or nodes in + the cluster which are managed by Cilium, including enforcement status, + IP addressing and whether the networking is succesfully operational. + apiVersion: v2 + appVersion: 1.9.9 + created: "2021-07-19T17:20:26-07:00" + description: eBPF-based Networking, Security, and Observability + digest: 81da8f9e707308a69d37a143d31fca7c4b2fc8aad4a9d1a98150ee8b6b1f9891 + home: https://cilium.io/ + icon: https://cdn.jsdelivr.net/gh/cilium/cilium@v1.9.9/Documentation/images/logo-solo.svg + keywords: + - BPF + - eBPF + - Kubernetes + - Networking + - Security + - Observability + - Troubleshooting + kubeVersion: '>= 1.12.0-0' + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.9.9.tgz + version: 1.9.9 + - annotations: + artifacthub.io/crds: | + - kind: CiliumNetworkPolicy + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium Network Policy + description: | + Cilium Network Policies provide additional functionality beyond what + is provided by standard Kubernetes NetworkPolicy such as the ability + to allow traffic based on FQDNs, or to filter at Layer 7. + - kind: CiliumClusterwideNetworkPolicy + version: v2 + name: ciliumclusterwidenetworkpolicy + displayName: Cilium Clusterwide Network Policy + description: | + Cilium Clusterwide Network Policies support configuring network traffic + policiies across the entire cluster, including applying node firewalls. + - kind: CiliumExternalWorkload + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium External Workload + description: | + Cilium External Workload supports configuring the ability for external + non-Kubernetes workloads to join the cluster. + - kind: CiliumLocalRedirectPolicy + version: v2 + name: ciliumlocalredirectpolicy + displayName: Cilium Local Redirect Policy + description: | + Cilium Local Redirect Policy allows local redirects to be configured + within a node to support use cases like Node-Local DNS or KIAM. + - kind: CiliumNode + version: v2 + name: ciliumnode + displayName: Cilium Node + description: | + Cilium Node represents a node managed by Cilium. It contains a + specification to control various node specific configuration aspects + and a status section to represent the status of the node. + - kind: CiliumIdentity + version: v2 + name: ciliumidentity + displayName: Cilium Identity + description: | + Cilium Identity allows introspection into security identities that + Cilium allocates which identify sets of labels that are assigned to + individual endpoints in the cluster. + - kind: CiliumEndpoint + version: v2 + name: ciliumendpoint + displayName: Cilium Endpoint + description: | + Cilium Endpoint represents the status of individual pods or nodes in + the cluster which are managed by Cilium, including enforcement status, + IP addressing and whether the networking is succesfully operational. + apiVersion: v2 + appVersion: 1.9.8 + created: "2021-05-28T14:28:16+02:00" + description: eBPF-based Networking, Security, and Observability + digest: bf8f5a4109d2434428ce15051ced4499ff7905aa00c4aa59ae71cab61b01d38d + home: https://cilium.io/ + icon: https://cdn.jsdelivr.net/gh/cilium/cilium@v1.9.8/Documentation/images/logo-solo.svg + keywords: + - BPF + - eBPF + - Kubernetes + - Networking + - Security + - Observability + - Troubleshooting + kubeVersion: '>= 1.12.0-0' + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.9.8.tgz + version: 1.9.8 + - annotations: + artifacthub.io/crds: | + - kind: CiliumNetworkPolicy + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium Network Policy + description: | + Cilium Network Policies provide additional functionality beyond what + is provided by standard Kubernetes NetworkPolicy such as the ability + to allow traffic based on FQDNs, or to filter at Layer 7. + - kind: CiliumClusterwideNetworkPolicy + version: v2 + name: ciliumclusterwidenetworkpolicy + displayName: Cilium Clusterwide Network Policy + description: | + Cilium Clusterwide Network Policies support configuring network traffic + policiies across the entire cluster, including applying node firewalls. + - kind: CiliumExternalWorkload + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium External Workload + description: | + Cilium External Workload supports configuring the ability for external + non-Kubernetes workloads to join the cluster. + - kind: CiliumLocalRedirectPolicy + version: v2 + name: ciliumlocalredirectpolicy + displayName: Cilium Local Redirect Policy + description: | + Cilium Local Redirect Policy allows local redirects to be configured + within a node to support use cases like Node-Local DNS or KIAM. + - kind: CiliumNode + version: v2 + name: ciliumnode + displayName: Cilium Node + description: | + Cilium Node represents a node managed by Cilium. It contains a + specification to control various node specific configuration aspects + and a status section to represent the status of the node. + - kind: CiliumIdentity + version: v2 + name: ciliumidentity + displayName: Cilium Identity + description: | + Cilium Identity allows introspection into security identities that + Cilium allocates which identify sets of labels that are assigned to + individual endpoints in the cluster. + - kind: CiliumEndpoint + version: v2 + name: ciliumendpoint + displayName: Cilium Endpoint + description: | + Cilium Endpoint represents the status of individual pods or nodes in + the cluster which are managed by Cilium, including enforcement status, + IP addressing and whether the networking is succesfully operational. + apiVersion: v2 + appVersion: 1.9.7 + created: "2021-05-17T09:50:57-07:00" + description: eBPF-based Networking, Security, and Observability + digest: 7ebaf404beee57d4f9d67afaa04b1e25c45f87b36c4aa15c828a0e74585b811f + home: https://cilium.io/ + icon: https://cdn.jsdelivr.net/gh/cilium/cilium@v1.9.7/Documentation/images/logo-solo.svg + keywords: + - BPF + - eBPF + - Kubernetes + - Networking + - Security + - Observability + - Troubleshooting + kubeVersion: '>= 1.12.0-0' + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.9.7.tgz + version: 1.9.7 + - annotations: + artifacthub.io/crds: | + - kind: CiliumNetworkPolicy + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium Network Policy + description: | + Cilium Network Policies provide additional functionality beyond what + is provided by standard Kubernetes NetworkPolicy such as the ability + to allow traffic based on FQDNs, or to filter at Layer 7. + - kind: CiliumClusterwideNetworkPolicy + version: v2 + name: ciliumclusterwidenetworkpolicy + displayName: Cilium Clusterwide Network Policy + description: | + Cilium Clusterwide Network Policies support configuring network traffic + policiies across the entire cluster, including applying node firewalls. + - kind: CiliumExternalWorkload + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium External Workload + description: | + Cilium External Workload supports configuring the ability for external + non-Kubernetes workloads to join the cluster. + - kind: CiliumLocalRedirectPolicy + version: v2 + name: ciliumlocalredirectpolicy + displayName: Cilium Local Redirect Policy + description: | + Cilium Local Redirect Policy allows local redirects to be configured + within a node to support use cases like Node-Local DNS or KIAM. + - kind: CiliumNode + version: v2 + name: ciliumnode + displayName: Cilium Node + description: | + Cilium Node represents a node managed by Cilium. It contains a + specification to control various node specific configuration aspects + and a status section to represent the status of the node. + - kind: CiliumIdentity + version: v2 + name: ciliumidentity + displayName: Cilium Identity + description: | + Cilium Identity allows introspection into security identities that + Cilium allocates which identify sets of labels that are assigned to + individual endpoints in the cluster. + - kind: CiliumEndpoint + version: v2 + name: ciliumendpoint + displayName: Cilium Endpoint + description: | + Cilium Endpoint represents the status of individual pods or nodes in + the cluster which are managed by Cilium, including enforcement status, + IP addressing and whether the networking is succesfully operational. + apiVersion: v2 + appVersion: 1.9.6 + created: "2021-04-20T16:57:09-07:00" + description: eBPF-based Networking, Security, and Observability + digest: f4ada59672ccb7c4bc7520e0c2610416de8b705ae26c169fcba0769ca51671c0 + home: https://cilium.io/ + icon: https://cdn.jsdelivr.net/gh/cilium/cilium@v1.9.6/Documentation/images/logo-solo.svg + keywords: + - BPF + - eBPF + - Kubernetes + - Networking + - Security + - Observability + - Troubleshooting + kubeVersion: '>= 1.12.0-0' + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.9.6.tgz + version: 1.9.6 + - annotations: + artifacthub.io/crds: | + - kind: CiliumNetworkPolicy + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium Network Policy + description: | + Cilium Network Policies provide additional functionality beyond what + is provided by standard Kubernetes NetworkPolicy such as the ability + to allow traffic based on FQDNs, or to filter at Layer 7. + - kind: CiliumClusterwideNetworkPolicy + version: v2 + name: ciliumclusterwidenetworkpolicy + displayName: Cilium Clusterwide Network Policy + description: | + Cilium Clusterwide Network Policies support configuring network traffic + policiies across the entire cluster, including applying node firewalls. + - kind: CiliumExternalWorkload + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium External Workload + description: | + Cilium External Workload supports configuring the ability for external + non-Kubernetes workloads to join the cluster. + - kind: CiliumLocalRedirectPolicy + version: v2 + name: ciliumlocalredirectpolicy + displayName: Cilium Local Redirect Policy + description: | + Cilium Local Redirect Policy allows local redirects to be configured + within a node to support use cases like Node-Local DNS or KIAM. + - kind: CiliumNode + version: v2 + name: ciliumnode + displayName: Cilium Node + description: | + Cilium Node represents a node managed by Cilium. It contains a + specification to control various node specific configuration aspects + and a status section to represent the status of the node. + - kind: CiliumIdentity + version: v2 + name: ciliumidentity + displayName: Cilium Identity + description: | + Cilium Identity allows introspection into security identities that + Cilium allocates which identify sets of labels that are assigned to + individual endpoints in the cluster. + - kind: CiliumEndpoint + version: v2 + name: ciliumendpoint + displayName: Cilium Endpoint + description: | + Cilium Endpoint represents the status of individual pods or nodes in + the cluster which are managed by Cilium, including enforcement status, + IP addressing and whether the networking is succesfully operational. + apiVersion: v2 + appVersion: 1.9.5 + created: "2021-03-10T16:40:50-08:00" + description: eBPF-based Networking, Security, and Observability + digest: 9ff015652bc0a992cc45921016487fa4aac9e5ebd3c4ebcfa2e6e7cc6d9b5ffe + home: https://cilium.io/ + icon: https://cdn.jsdelivr.net/gh/cilium/cilium@v1.9.5/Documentation/images/logo-solo.svg + keywords: + - BPF + - eBPF + - Kubernetes + - Networking + - Security + - Observability + - Troubleshooting + kubeVersion: '>= 1.12.0-0' + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.9.5.tgz + version: 1.9.5 + - annotations: + artifacthub.io/crds: | + - kind: CiliumNetworkPolicy + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium Network Policy + description: | + Cilium Network Policies provide additional functionality beyond what + is provided by standard Kubernetes NetworkPolicy such as the ability + to allow traffic based on FQDNs, or to filter at Layer 7. + - kind: CiliumClusterwideNetworkPolicy + version: v2 + name: ciliumclusterwidenetworkpolicy + displayName: Cilium Clusterwide Network Policy + description: | + Cilium Clusterwide Network Policies support configuring network traffic + policiies across the entire cluster, including applying node firewalls. + - kind: CiliumExternalWorkload + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium External Workload + description: | + Cilium External Workload supports configuring the ability for external + non-Kubernetes workloads to join the cluster. + - kind: CiliumLocalRedirectPolicy + version: v2 + name: ciliumlocalredirectpolicy + displayName: Cilium Local Redirect Policy + description: | + Cilium Local Redirect Policy allows local redirects to be configured + within a node to support use cases like Node-Local DNS or KIAM. + - kind: CiliumNode + version: v2 + name: ciliumnode + displayName: Cilium Node + description: | + Cilium Node represents a node managed by Cilium. It contains a + specification to control various node specific configuration aspects + and a status section to represent the status of the node. + - kind: CiliumIdentity + version: v2 + name: ciliumidentity + displayName: Cilium Identity + description: | + Cilium Identity allows introspection into security identities that + Cilium allocates which identify sets of labels that are assigned to + individual endpoints in the cluster. + - kind: CiliumEndpoint + version: v2 + name: ciliumendpoint + displayName: Cilium Endpoint + description: | + Cilium Endpoint represents the status of individual pods or nodes in + the cluster which are managed by Cilium, including enforcement status, + IP addressing and whether the networking is succesfully operational. + apiVersion: v2 + appVersion: 1.9.4 + created: "2021-02-03T16:18:07-08:00" + description: eBPF-based Networking, Security, and Observability + digest: 28fa00f5a56b943ea060c22a63cbce9f52400ebef868156c5367a7ca0ae95f61 + home: https://cilium.io/ + icon: https://cdn.jsdelivr.net/gh/cilium/cilium@v1.9.4/Documentation/images/logo-solo.svg + keywords: + - BPF + - eBPF + - Kubernetes + - Networking + - Security + - Observability + - Troubleshooting + kubeVersion: '>= 1.12.0-0' + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.9.4.tgz + version: 1.9.4 + - annotations: + artifacthub.io/crds: | + - kind: CiliumNetworkPolicy + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium Network Policy + description: | + Cilium Network Policies provide additional functionality beyond what + is provided by standard Kubernetes NetworkPolicy such as the ability + to allow traffic based on FQDNs, or to filter at Layer 7. + - kind: CiliumClusterwideNetworkPolicy + version: v2 + name: ciliumclusterwidenetworkpolicy + displayName: Cilium Clusterwide Network Policy + description: | + Cilium Clusterwide Network Policies support configuring network traffic + policiies across the entire cluster, including applying node firewalls. + - kind: CiliumExternalWorkload + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium External Workload + description: | + Cilium External Workload supports configuring the ability for external + non-Kubernetes workloads to join the cluster. + - kind: CiliumLocalRedirectPolicy + version: v2 + name: ciliumlocalredirectpolicy + displayName: Cilium Local Redirect Policy + description: | + Cilium Local Redirect Policy allows local redirects to be configured + within a node to support use cases like Node-Local DNS or KIAM. + - kind: CiliumNode + version: v2 + name: ciliumnode + displayName: Cilium Node + description: | + Cilium Node represents a node managed by Cilium. It contains a + specification to control various node specific configuration aspects + and a status section to represent the status of the node. + - kind: CiliumIdentity + version: v2 + name: ciliumidentity + displayName: Cilium Identity + description: | + Cilium Identity allows introspection into security identities that + Cilium allocates which identify sets of labels that are assigned to + individual endpoints in the cluster. + - kind: CiliumEndpoint + version: v2 + name: ciliumendpoint + displayName: Cilium Endpoint + description: | + Cilium Endpoint represents the status of individual pods or nodes in + the cluster which are managed by Cilium, including enforcement status, + IP addressing and whether the networking is succesfully operational. + apiVersion: v2 + appVersion: 1.9.3 + created: "2021-01-22T20:58:32+01:00" + description: eBPF-based Networking, Security, and Observability + digest: 8ec89c3d31d0d54841996c6a161be7b4cf7dba824dd671b28a58cb11e2cf4a6e + home: https://cilium.io/ + icon: https://cdn.jsdelivr.net/gh/cilium/cilium@v1.9.3/Documentation/images/logo-solo.svg + keywords: + - BPF + - eBPF + - Kubernetes + - Networking + - Security + - Observability + - Troubleshooting + kubeVersion: '>= 1.12.0-0' + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.9.3.tgz + version: 1.9.3 + - annotations: + artifacthub.io/crds: | + - kind: CiliumNetworkPolicy + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium Network Policy + description: | + Cilium Network Policies provide additional functionality beyond what + is provided by standard Kubernetes NetworkPolicy such as the ability + to allow traffic based on FQDNs, or to filter at Layer 7. + - kind: CiliumClusterwideNetworkPolicy + version: v2 + name: ciliumclusterwidenetworkpolicy + displayName: Cilium Clusterwide Network Policy + description: | + Cilium Clusterwide Network Policies support configuring network traffic + policiies across the entire cluster, including applying node firewalls. + - kind: CiliumExternalWorkload + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium External Workload + description: | + Cilium External Workload supports configuring the ability for external + non-Kubernetes workloads to join the cluster. + - kind: CiliumLocalRedirectPolicy + version: v2 + name: ciliumlocalredirectpolicy + displayName: Cilium Local Redirect Policy + description: | + Cilium Local Redirect Policy allows local redirects to be configured + within a node to support use cases like Node-Local DNS or KIAM. + - kind: CiliumNode + version: v2 + name: ciliumnode + displayName: Cilium Node + description: | + Cilium Node represents a node managed by Cilium. It contains a + specification to control various node specific configuration aspects + and a status section to represent the status of the node. + - kind: CiliumIdentity + version: v2 + name: ciliumidentity + displayName: Cilium Identity + description: | + Cilium Identity allows introspection into security identities that + Cilium allocates which identify sets of labels that are assigned to + individual endpoints in the cluster. + - kind: CiliumEndpoint + version: v2 + name: ciliumendpoint + displayName: Cilium Endpoint + description: | + Cilium Endpoint represents the status of individual pods or nodes in + the cluster which are managed by Cilium, including enforcement status, + IP addressing and whether the networking is succesfully operational. + apiVersion: v2 + appVersion: 1.9.2 + created: "2021-01-20T16:16:56+01:00" + description: eBPF-based Networking, Security, and Observability + digest: 93cd87ed5d0ee6bcacbbafc31f8980df7cf478dcc3e57b66dabcd0b4eff357f3 + home: https://cilium.io/ + icon: https://cdn.jsdelivr.net/gh/cilium/cilium@v1.9.2/Documentation/images/logo-solo.svg + keywords: + - BPF + - eBPF + - Kubernetes + - Networking + - Security + - Observability + - Troubleshooting + kubeVersion: '>= 1.12.0-0' + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.9.2.tgz + version: 1.9.2 + - annotations: + artifacthub.io/crds: | + - kind: CiliumNetworkPolicy + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium Network Policy + description: | + Cilium Network Policies provide additional functionality beyond what + is provided by standard Kubernetes NetworkPolicy such as the ability + to allow traffic based on FQDNs, or to filter at Layer 7. + - kind: CiliumClusterwideNetworkPolicy + version: v2 + name: ciliumclusterwidenetworkpolicy + displayName: Cilium Clusterwide Network Policy + description: | + Cilium Clusterwide Network Policies support configuring network traffic + policiies across the entire cluster, including applying node firewalls. + - kind: CiliumExternalWorkload + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium External Workload + description: | + Cilium External Workload supports configuring the ability for external + non-Kubernetes workloads to join the cluster. + - kind: CiliumLocalRedirectPolicy + version: v2 + name: ciliumlocalredirectpolicy + displayName: Cilium Local Redirect Policy + description: | + Cilium Local Redirect Policy allows local redirects to be configured + within a node to support use cases like Node-Local DNS or KIAM. + - kind: CiliumNode + version: v2 + name: ciliumnode + displayName: Cilium Node + description: | + Cilium Node represents a node managed by Cilium. It contains a + specification to control various node specific configuration aspects + and a status section to represent the status of the node. + - kind: CiliumIdentity + version: v2 + name: ciliumidentity + displayName: Cilium Identity + description: | + Cilium Identity allows introspection into security identities that + Cilium allocates which identify sets of labels that are assigned to + individual endpoints in the cluster. + - kind: CiliumEndpoint + version: v2 + name: ciliumendpoint + displayName: Cilium Endpoint + description: | + Cilium Endpoint represents the status of individual pods or nodes in + the cluster which are managed by Cilium, including enforcement status, + IP addressing and whether the networking is succesfully operational. + apiVersion: v2 + appVersion: 1.9.1 + created: "2020-12-04T18:50:03+01:00" + description: eBPF-based Networking, Security, and Observability + digest: 079e55fcd4bdf5e66eb8ffff221d138c0e203c2ec32220b7ac49e477239fa851 + home: https://cilium.io/ + icon: https://cdn.jsdelivr.net/gh/cilium/cilium@v1.9.1/Documentation/images/logo-solo.svg + keywords: + - BPF + - eBPF + - Kubernetes + - Networking + - Security + - Observability + - Troubleshooting + kubeVersion: '>= 1.12.0-0' + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.9.1.tgz + version: 1.9.1 + - apiVersion: v2 + appVersion: 1.9.0 + created: "2020-11-10T17:16:14+01:00" + description: Helm chart for Cilium + digest: 7aaa0ccc7ca58ecaa72f063be7553799cdc9357c7f96de2bdf052d2d551385ec + home: https://cilium.io/ + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.9.0.tgz + version: 1.9.0 + - apiVersion: v2 + appVersion: 1.9.0-rc3 + created: "2020-11-02T16:47:36-08:00" + description: Helm chart for Cilium + digest: b590c586d184ecd431d34a741cb61604cc58677f1835fa709ccc3f89c7b15226 + home: https://cilium.io/ + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.9.0-rc3.tgz + version: 1.9.0-rc3 + - apiVersion: v2 + appVersion: 1.9.0-rc2 + created: "2020-10-19T16:44:31+02:00" + description: Helm chart for Cilium + digest: a6ea50230974af6d4661b0ab5fb5b6e9f19df6c981ba2b3341f7b6f03fe660a1 + home: https://cilium.io/ + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.9.0-rc2.tgz + version: 1.9.0-rc2 + - apiVersion: v2 + appVersion: 1.9.0-rc1 + created: "2020-10-12T18:49:21-07:00" + description: Helm chart for Cilium + digest: 8283ec0b063dd777d479045809994718f853628e9fcef24a36ccc2e6b8bd376c + home: https://cilium.io/ + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.9.0-rc1.tgz + version: 1.9.0-rc1 + - apiVersion: v2 + appVersion: 1.9.0-rc0 + created: "2020-08-18T22:28:07+02:00" + dependencies: + - condition: agent.enabled + name: agent + repository: "" + version: 1.9.0-rc0 + - condition: config.enabled + name: config + repository: "" + version: 1.9.0-rc0 + - condition: global.hubble.relay.enabled + name: hubble-relay + repository: "" + version: 1.9.0-rc0 + - condition: global.hubble.ui.enabled + name: hubble-ui + repository: "" + version: 1.9.0-rc0 + - condition: operator.enabled + name: operator + repository: "" + version: 1.9.0-rc0 + - condition: global.etcd.managed + name: managed-etcd + repository: "" + version: 1.9.0-rc0 + - condition: preflight.enabled + name: preflight + repository: "" + version: 1.9.0-rc0 + - condition: global.nodeinit.enabled + name: nodeinit + repository: "" + version: 1.9.0-rc0 + - condition: global.prometheus.enabled && global.prometheus.serviceMonitor.enabled + name: prometheus-crd + repository: "" + version: 1.9.0-rc0 + description: Helm chart for Cilium + digest: 63db71234456ac656cb5c15dbea89673717e4c81088baeb848e708f33d24835c + home: https://cilium.io/ + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.9.0-rc0.tgz + version: 1.9.0-rc0 + - annotations: + artifacthub.io/crds: | + - kind: CiliumNetworkPolicy + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium Network Policy + description: | + Cilium Network Policies provide additional functionality beyond what + is provided by standard Kubernetes NetworkPolicy such as the ability + to allow traffic based on FQDNs, or to filter at Layer 7. + - kind: CiliumClusterwideNetworkPolicy + version: v2 + name: ciliumclusterwidenetworkpolicy + displayName: Cilium Clusterwide Network Policy + description: | + Cilium Clusterwide Network Policies support configuring network traffic + policiies across the entire cluster, including applying node firewalls. + - kind: CiliumExternalWorkload + version: v2 + name: ciliumnetworkpolicy + displayName: Cilium External Workload + description: | + Cilium External Workload supports configuring the ability for external + non-Kubernetes workloads to join the cluster. + - kind: CiliumLocalRedirectPolicy + version: v2 + name: ciliumlocalredirectpolicy + displayName: Cilium Local Redirect Policy + description: | + Cilium Local Redirect Policy allows local redirects to be configured + within a node to support use cases like Node-Local DNS or KIAM. + - kind: CiliumNode + version: v2 + name: ciliumnode + displayName: Cilium Node + description: | + Cilium Node represents a node managed by Cilium. It contains a + specification to control various node specific configuration aspects + and a status section to represent the status of the node. + - kind: CiliumIdentity + version: v2 + name: ciliumidentity + displayName: Cilium Identity + description: | + Cilium Identity allows introspection into security identities that + Cilium allocates which identify sets of labels that are assigned to + individual endpoints in the cluster. + - kind: CiliumEndpoint + version: v2 + name: ciliumendpoint + displayName: Cilium Endpoint + description: | + Cilium Endpoint represents the status of individual pods or nodes in + the cluster which are managed by Cilium, including enforcement status, + IP addressing and whether the networking is succesfully operational. + apiVersion: v2 + appVersion: 1.9-dev + created: "2020-12-04T18:50:45+01:00" + description: eBPF-based Networking, Security, and Observability + digest: e244a7e42419aa4663e7d48e26bf72b232cfdcb0306e403cb91b197f8f696fb8 + home: https://cilium.io/ + icon: https://cdn.jsdelivr.net/gh/cilium/cilium@v1.9/Documentation/images/logo-solo.svg + keywords: + - BPF + - eBPF + - Kubernetes + - Networking + - Security + - Observability + - Troubleshooting + kubeVersion: '>= 1.12.0-0' + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.9-dev.tgz + version: 1.9-dev + - apiVersion: v1 + appVersion: 1.8.13 + created: "2021-11-05T16:41:52-07:00" + dependencies: + - condition: agent.enabled + name: agent + repository: "" + version: 1.8.13 + - condition: config.enabled + name: config + repository: "" + version: 1.8.13 + - condition: global.hubble.relay.enabled + name: hubble-relay + repository: "" + version: 1.8.13 + - condition: global.hubble.ui.enabled + name: hubble-ui + repository: "" + version: 1.8.13 + - condition: operator.enabled + name: operator + repository: "" + version: 1.8.13 + - condition: global.etcd.managed + name: managed-etcd + repository: "" + version: 1.8.13 + - condition: preflight.enabled + name: preflight + repository: "" + version: 1.8.13 + - condition: global.nodeinit.enabled + name: nodeinit + repository: "" + version: 1.8.13 + - condition: global.prometheus.enabled && global.prometheus.serviceMonitor.enabled + name: prometheus-crd + repository: "" + version: 1.8.13 + description: Helm chart for Cilium + digest: cce4af63912ce41fc7c3a919257ebec21ec067b13113a1f6db079836f71330c4 + home: https://cilium.io/ + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.8.13.tgz + version: 1.8.13 + - apiVersion: v1 + appVersion: 1.8.12 + created: "2021-09-02T17:36:13-07:00" + dependencies: + - condition: agent.enabled + name: agent + repository: "" + version: 1.8.12 + - condition: config.enabled + name: config + repository: "" + version: 1.8.12 + - condition: global.hubble.relay.enabled + name: hubble-relay + repository: "" + version: 1.8.12 + - condition: global.hubble.ui.enabled + name: hubble-ui + repository: "" + version: 1.8.12 + - condition: operator.enabled + name: operator + repository: "" + version: 1.8.12 + - condition: global.etcd.managed + name: managed-etcd + repository: "" + version: 1.8.12 + - condition: preflight.enabled + name: preflight + repository: "" + version: 1.8.12 + - condition: global.nodeinit.enabled + name: nodeinit + repository: "" + version: 1.8.12 + - condition: global.prometheus.enabled && global.prometheus.serviceMonitor.enabled + name: prometheus-crd + repository: "" + version: 1.8.12 + description: Helm chart for Cilium + digest: 047c1be689dbfa94e0f3d3ca3f3a6c38ea3fe569a3716502f5d2733e499bcbac + home: https://cilium.io/ + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.8.12.tgz + version: 1.8.12 + - apiVersion: v1 + appVersion: 1.8.11 + created: "2021-07-23T10:31:18-07:00" + dependencies: + - condition: agent.enabled + name: agent + repository: "" + version: 1.8.11 + - condition: config.enabled + name: config + repository: "" + version: 1.8.11 + - condition: global.hubble.relay.enabled + name: hubble-relay + repository: "" + version: 1.8.11 + - condition: global.hubble.ui.enabled + name: hubble-ui + repository: "" + version: 1.8.11 + - condition: operator.enabled + name: operator + repository: "" + version: 1.8.11 + - condition: global.etcd.managed + name: managed-etcd + repository: "" + version: 1.8.11 + - condition: preflight.enabled + name: preflight + repository: "" + version: 1.8.11 + - condition: global.nodeinit.enabled + name: nodeinit + repository: "" + version: 1.8.11 + - condition: global.prometheus.enabled && global.prometheus.serviceMonitor.enabled + name: prometheus-crd + repository: "" + version: 1.8.11 + description: Helm chart for Cilium + digest: ba6127f642ba7ccad55a24d581d46e22ebbb503232a1bf40a0660ad3b60d3727 + home: https://cilium.io/ + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.8.11.tgz + version: 1.8.11 + - apiVersion: v1 + appVersion: 1.8.10 + created: "2021-05-17T09:45:14-07:00" + dependencies: + - condition: agent.enabled + name: agent + repository: "" + version: 1.8.10 + - condition: config.enabled + name: config + repository: "" + version: 1.8.10 + - condition: global.hubble.relay.enabled + name: hubble-relay + repository: "" + version: 1.8.10 + - condition: global.hubble.ui.enabled + name: hubble-ui + repository: "" + version: 1.8.10 + - condition: operator.enabled + name: operator + repository: "" + version: 1.8.10 + - condition: global.etcd.managed + name: managed-etcd + repository: "" + version: 1.8.10 + - condition: preflight.enabled + name: preflight + repository: "" + version: 1.8.10 + - condition: global.nodeinit.enabled + name: nodeinit + repository: "" + version: 1.8.10 + - condition: global.prometheus.enabled && global.prometheus.serviceMonitor.enabled + name: prometheus-crd + repository: "" + version: 1.8.10 + description: Helm chart for Cilium + digest: 47f97a29a842125e3423722be8e40a8104c77eadc5b0b652ea4a50af9142d8a8 + home: https://cilium.io/ + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.8.10.tgz + version: 1.8.10 + - apiVersion: v1 + appVersion: 1.8.9 + created: "2021-04-20T16:56:07-07:00" + dependencies: + - condition: agent.enabled + name: agent + repository: "" + version: 1.8.9 + - condition: config.enabled + name: config + repository: "" + version: 1.8.9 + - condition: global.hubble.relay.enabled + name: hubble-relay + repository: "" + version: 1.8.9 + - condition: global.hubble.ui.enabled + name: hubble-ui + repository: "" + version: 1.8.9 + - condition: operator.enabled + name: operator + repository: "" + version: 1.8.9 + - condition: global.etcd.managed + name: managed-etcd + repository: "" + version: 1.8.9 + - condition: preflight.enabled + name: preflight + repository: "" + version: 1.8.9 + - condition: global.nodeinit.enabled + name: nodeinit + repository: "" + version: 1.8.9 + - condition: global.prometheus.enabled && global.prometheus.serviceMonitor.enabled + name: prometheus-crd + repository: "" + version: 1.8.9 + description: Helm chart for Cilium + digest: 63d2ade53017472c36e0addfb7ae0fd63f43a521fa8b5ae2d6791b4c9b2cd6f0 + home: https://cilium.io/ + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.8.9.tgz + version: 1.8.9 + - apiVersion: v1 + appVersion: 1.8.8 + created: "2021-03-10T12:07:26-08:00" + dependencies: + - condition: agent.enabled + name: agent + repository: "" + version: 1.8.8 + - condition: config.enabled + name: config + repository: "" + version: 1.8.8 + - condition: global.hubble.relay.enabled + name: hubble-relay + repository: "" + version: 1.8.8 + - condition: global.hubble.ui.enabled + name: hubble-ui + repository: "" + version: 1.8.8 + - condition: operator.enabled + name: operator + repository: "" + version: 1.8.8 + - condition: global.etcd.managed + name: managed-etcd + repository: "" + version: 1.8.8 + - condition: preflight.enabled + name: preflight + repository: "" + version: 1.8.8 + - condition: global.nodeinit.enabled + name: nodeinit + repository: "" + version: 1.8.8 + - condition: global.prometheus.enabled && global.prometheus.serviceMonitor.enabled + name: prometheus-crd + repository: "" + version: 1.8.8 + description: Helm chart for Cilium + digest: 3b453c710bd3e78c821f8a8793edeeb86a2f2497ed02b06bcef6d9adddcd8591 + home: https://cilium.io/ + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.8.8.tgz + version: 1.8.8 + - apiVersion: v1 + appVersion: 1.8.7 + created: "2021-02-17T16:52:29-08:00" + dependencies: + - condition: agent.enabled + name: agent + repository: "" + version: 1.8.7 + - condition: config.enabled + name: config + repository: "" + version: 1.8.7 + - condition: global.hubble.relay.enabled + name: hubble-relay + repository: "" + version: 1.8.7 + - condition: global.hubble.ui.enabled + name: hubble-ui + repository: "" + version: 1.8.7 + - condition: operator.enabled + name: operator + repository: "" + version: 1.8.7 + - condition: global.etcd.managed + name: managed-etcd + repository: "" + version: 1.8.7 + - condition: preflight.enabled + name: preflight + repository: "" + version: 1.8.7 + - condition: global.nodeinit.enabled + name: nodeinit + repository: "" + version: 1.8.7 + - condition: global.prometheus.enabled && global.prometheus.serviceMonitor.enabled + name: prometheus-crd + repository: "" + version: 1.8.7 + description: Helm chart for Cilium + digest: f72feeb6afdb9bca8f69aac5ce50665e3348ae790632c66975d186968037d4ed + home: https://cilium.io/ + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.8.7.tgz + version: 1.8.7 + - apiVersion: v1 + appVersion: 1.8.6 + created: "2020-12-04T18:32:13+01:00" + dependencies: + - condition: agent.enabled + name: agent + repository: "" + version: 1.8.6 + - condition: config.enabled + name: config + repository: "" + version: 1.8.6 + - condition: global.hubble.relay.enabled + name: hubble-relay + repository: "" + version: 1.8.6 + - condition: global.hubble.ui.enabled + name: hubble-ui + repository: "" + version: 1.8.6 + - condition: operator.enabled + name: operator + repository: "" + version: 1.8.6 + - condition: global.etcd.managed + name: managed-etcd + repository: "" + version: 1.8.6 + - condition: preflight.enabled + name: preflight + repository: "" + version: 1.8.6 + - condition: global.nodeinit.enabled + name: nodeinit + repository: "" + version: 1.8.6 + - condition: global.prometheus.enabled && global.prometheus.serviceMonitor.enabled + name: prometheus-crd + repository: "" + version: 1.8.6 + description: Helm chart for Cilium + digest: e617123f6d9e4a9a023defdc7001a4e9353b4d2698a99a962c1524fe204d43b3 + home: https://cilium.io/ + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.8.6.tgz + version: 1.8.6 + - apiVersion: v1 + appVersion: 1.8.5 + created: "2020-10-28T16:30:24-07:00" + dependencies: + - condition: agent.enabled + name: agent + repository: "" + version: 1.8.5 + - condition: config.enabled + name: config + repository: "" + version: 1.8.5 + - condition: global.hubble.relay.enabled + name: hubble-relay + repository: "" + version: 1.8.5 + - condition: global.hubble.ui.enabled + name: hubble-ui + repository: "" + version: 1.8.5 + - condition: operator.enabled + name: operator + repository: "" + version: 1.8.5 + - condition: global.etcd.managed + name: managed-etcd + repository: "" + version: 1.8.5 + - condition: preflight.enabled + name: preflight + repository: "" + version: 1.8.5 + - condition: global.nodeinit.enabled + name: nodeinit + repository: "" + version: 1.8.5 + - condition: global.prometheus.enabled && global.prometheus.serviceMonitor.enabled + name: prometheus-crd + repository: "" + version: 1.8.5 + description: Helm chart for Cilium + digest: 300c8c60868f9d049ef7b7d4cdd994044db3726adfde4113d5e89728f55cc8ac + home: https://cilium.io/ + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.8.5.tgz + version: 1.8.5 + - apiVersion: v1 + appVersion: 1.8.4 + created: "2020-09-30T17:51:09-07:00" + dependencies: + - condition: agent.enabled + name: agent + repository: "" + version: 1.8.4 + - condition: config.enabled + name: config + repository: "" + version: 1.8.4 + - condition: global.hubble.relay.enabled + name: hubble-relay + repository: "" + version: 1.8.4 + - condition: global.hubble.ui.enabled + name: hubble-ui + repository: "" + version: 1.8.4 + - condition: operator.enabled + name: operator + repository: "" + version: 1.8.4 + - condition: global.etcd.managed + name: managed-etcd + repository: "" + version: 1.8.4 + - condition: preflight.enabled + name: preflight + repository: "" + version: 1.8.4 + - condition: global.nodeinit.enabled + name: nodeinit + repository: "" + version: 1.8.4 + - condition: global.prometheus.enabled && global.prometheus.serviceMonitor.enabled + name: prometheus-crd + repository: "" + version: 1.8.4 + description: Helm chart for Cilium + digest: 413e40e7f64be9861dceaceae8725b8ecacc87bb22bafc36c069b0ce2d3c0422 + home: https://cilium.io/ + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.8.4.tgz + version: 1.8.4 + - apiVersion: v1 + appVersion: 1.8.3 + created: "2020-09-04T15:09:39+02:00" + dependencies: + - condition: agent.enabled + name: agent + repository: "" + version: 1.8.3 + - condition: config.enabled + name: config + repository: "" + version: 1.8.3 + - condition: global.hubble.relay.enabled + name: hubble-relay + repository: "" + version: 1.8.3 + - condition: global.hubble.ui.enabled + name: hubble-ui + repository: "" + version: 1.8.3 + - condition: operator.enabled + name: operator + repository: "" + version: 1.8.3 + - condition: global.etcd.managed + name: managed-etcd + repository: "" + version: 1.8.3 + - condition: preflight.enabled + name: preflight + repository: "" + version: 1.8.3 + - condition: global.nodeinit.enabled + name: nodeinit + repository: "" + version: 1.8.3 + - condition: global.prometheus.enabled && global.prometheus.serviceMonitor.enabled + name: prometheus-crd + repository: "" + version: 1.8.3 + description: Helm chart for Cilium + digest: edc92f121bc5b12b8cf9c885eea8552780a3a732e6ca1a27c28aa9eb5e6e8968 + home: https://cilium.io/ + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.8.3.tgz + version: 1.8.3 + - apiVersion: v1 + appVersion: 1.8.2 + created: "2020-07-23T16:13:58-07:00" + dependencies: + - condition: agent.enabled + name: agent + repository: "" + version: 1.8.2 + - condition: config.enabled + name: config + repository: "" + version: 1.8.2 + - condition: global.hubble.relay.enabled + name: hubble-relay + repository: "" + version: 1.8.2 + - condition: global.hubble.ui.enabled + name: hubble-ui + repository: "" + version: 1.8.2 + - condition: operator.enabled + name: operator + repository: "" + version: 1.8.2 + - condition: global.etcd.managed + name: managed-etcd + repository: "" + version: 1.8.2 + - condition: preflight.enabled + name: preflight + repository: "" + version: 1.8.2 + - condition: global.nodeinit.enabled + name: nodeinit + repository: "" + version: 1.8.2 + - condition: global.prometheus.enabled && global.prometheus.serviceMonitor.enabled + name: prometheus-crd + repository: "" + version: 1.8.2 + description: Helm chart for Cilium + digest: 548e076dd195556faa8115946cf484f1b9ade71b08865ef47bf13b1488296f51 + home: https://cilium.io/ + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.8.2.tgz + version: 1.8.2 + - apiVersion: v1 + appVersion: 1.8.1 + created: "2020-07-02T21:45:34+02:00" + dependencies: + - condition: agent.enabled + name: agent + repository: "" + version: 1.8.1 + - condition: config.enabled + name: config + repository: "" + version: 1.8.1 + - condition: global.hubble.relay.enabled + name: hubble-relay + repository: "" + version: 1.8.1 + - condition: global.hubble.ui.enabled + name: hubble-ui + repository: "" + version: 1.8.1 + - condition: operator.enabled + name: operator + repository: "" + version: 1.8.1 + - condition: global.etcd.managed + name: managed-etcd + repository: "" + version: 1.8.1 + - condition: preflight.enabled + name: preflight + repository: "" + version: 1.8.1 + - condition: global.nodeinit.enabled + name: nodeinit + repository: "" + version: 1.8.1 + - condition: global.prometheus.enabled && global.prometheus.serviceMonitor.enabled + name: prometheus-crd + repository: "" + version: 1.8.1 + description: Helm chart for Cilium + digest: c72ab1f14806280b1ff8cec6be96aa7ac053898fb4203bacd5b7b782050e4c45 + home: https://cilium.io/ + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.8.1.tgz + version: 1.8.1 + - apiVersion: v1 + appVersion: 1.8.0 + created: "2020-06-22T17:52:19+02:00" + dependencies: + - condition: agent.enabled + name: agent + repository: "" + version: 1.8.0 + - condition: config.enabled + name: config + repository: "" + version: 1.8.0 + - condition: global.hubble.relay.enabled + name: hubble-relay + repository: "" + version: 1.8.0 + - condition: global.hubble.ui.enabled + name: hubble-ui + repository: "" + version: 1.8.0 + - condition: operator.enabled + name: operator + repository: "" + version: 1.8.0 + - condition: global.etcd.managed + name: managed-etcd + repository: "" + version: 1.8.0 + - condition: preflight.enabled + name: preflight + repository: "" + version: 1.8.0 + - condition: global.nodeinit.enabled + name: nodeinit + repository: "" + version: 1.8.0 + - condition: global.prometheus.enabled && global.prometheus.serviceMonitor.enabled + name: prometheus-crd + repository: "" + version: 1.8.0 + description: Helm chart for Cilium + digest: 7f2f7a2cff735799daf6c804ab7fb439d3c1ffa8b82f490a04f7eedf7e19bbfb + home: https://cilium.io/ + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.8.0.tgz + version: 1.8.0 + - apiVersion: v1 + appVersion: 1.8.0-rc4 + created: "2020-06-19T13:10:57-07:00" + dependencies: + - condition: agent.enabled + name: agent + repository: "" + version: 1.8.0-rc4 + - condition: config.enabled + name: config + repository: "" + version: 1.8.0-rc4 + - condition: global.hubble.relay.enabled + name: hubble-relay + repository: "" + version: 1.8.0-rc4 + - condition: global.hubble.ui.enabled + name: hubble-ui + repository: "" + version: 1.8.0-rc4 + - condition: operator.enabled + name: operator + repository: "" + version: 1.8.0-rc4 + - condition: global.etcd.managed + name: managed-etcd + repository: "" + version: 1.8.0-rc4 + - condition: preflight.enabled + name: preflight + repository: "" + version: 1.8.0-rc4 + - condition: global.nodeinit.enabled + name: nodeinit + repository: "" + version: 1.8.0-rc4 + - condition: global.prometheus.enabled && global.prometheus.serviceMonitor.enabled + name: prometheus-crd + repository: "" + version: 1.8.0-rc4 + description: Helm chart for Cilium + digest: f5e5ca6620006b600ed2687e8bea62af4e62dda288cb511b91aa7f388b1d662a + home: https://cilium.io/ + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.8.0-rc4.tgz + version: 1.8.0-rc4 + - apiVersion: v1 + appVersion: 1.8.0-rc3 + created: "2020-06-15T17:14:52+02:00" + dependencies: + - condition: agent.enabled + name: agent + repository: "" + version: 1.8.0-rc3 + - condition: config.enabled + name: config + repository: "" + version: 1.8.0-rc3 + - condition: global.hubble.relay.enabled + name: hubble-relay + repository: "" + version: 1.8.0-rc3 + - condition: global.hubble.ui.enabled + name: hubble-ui + repository: "" + version: 1.8.0-rc3 + - condition: operator.enabled + name: operator + repository: "" + version: 1.8.0-rc3 + - condition: global.etcd.managed + name: managed-etcd + repository: "" + version: 1.8.0-rc3 + - condition: preflight.enabled + name: preflight + repository: "" + version: 1.8.0-rc3 + - condition: global.nodeinit.enabled + name: nodeinit + repository: "" + version: 1.8.0-rc3 + - condition: global.prometheus.enabled && global.prometheus.serviceMonitor.enabled + name: prometheus-crd + repository: "" + version: 1.8.0-rc3 + description: Helm chart for Cilium + digest: a8b097d8ba1a60c09288750d4a3c0ac3afaae5706438df7e39f864c0939fbd4e + home: https://cilium.io/ + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.8.0-rc3.tgz + version: 1.8.0-rc3 + - apiVersion: v1 + appVersion: 1.8.0-rc2 + created: "2020-05-29T15:59:58-07:00" + dependencies: + - condition: agent.enabled + name: agent + repository: "" + version: 1.8.0-rc2 + - condition: config.enabled + name: config + repository: "" + version: 1.8.0-rc2 + - condition: global.hubble.cli.enabled + name: hubble-cli + repository: "" + version: 1.8.0-rc2 + - condition: global.hubble.relay.enabled + name: hubble-relay + repository: "" + version: 1.8.0-rc2 + - condition: global.hubble.ui.enabled + name: hubble-ui + repository: "" + version: 1.8.0-rc2 + - condition: operator.enabled + name: operator + repository: "" + version: 1.8.0-rc2 + - condition: global.etcd.managed + name: managed-etcd + repository: "" + version: 1.8.0-rc2 + - condition: preflight.enabled + name: preflight + repository: "" + version: 1.8.0-rc2 + - condition: global.nodeinit.enabled + name: nodeinit + repository: "" + version: 1.8.0-rc2 + - condition: global.prometheus.enabled && global.prometheus.serviceMonitor.enabled + name: prometheus-crd + repository: "" + version: 1.8.0-rc2 + description: Helm chart for Cilium + digest: 999b1e9c2df40c37841d0e8ef00bd7ed49bee69e070da288f3dbf77c77dbb353 + home: https://cilium.io/ + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.8.0-rc2.tgz + version: 1.8.0-rc2 + - apiVersion: v1 + appVersion: 1.8.0-rc1 + created: "2020-05-21T15:03:34-07:00" + dependencies: + - condition: agent.enabled + name: agent + repository: "" + version: 1.8.0-rc1 + - condition: config.enabled + name: config + repository: "" + version: 1.8.0-rc1 + - condition: global.hubble.cli.enabled + name: hubble-cli + repository: "" + version: 1.8.0-rc1 + - condition: global.hubble.relay.enabled + name: hubble-relay + repository: "" + version: 1.8.0-rc1 + - condition: global.hubble.ui.enabled + name: hubble-ui + repository: "" + version: 1.8.0-rc1 + - condition: operator.enabled + name: operator + repository: "" + version: 1.8.0-rc1 + - condition: global.etcd.managed + name: managed-etcd + repository: "" + version: 1.8.0-rc1 + - condition: preflight.enabled + name: preflight + repository: "" + version: 1.8.0-rc1 + - condition: global.nodeinit.enabled + name: nodeinit + repository: "" + version: 1.8.0-rc1 + description: Helm chart for Cilium + digest: bc14119e24eaa2df8949a58b0bc26bbceb2dc0b5bbab43b25574022c5c06a9ed + home: https://cilium.io/ + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.8.0-rc1.tgz + version: 1.8.0-rc1 + - apiVersion: v1 + appVersion: 1.8-dev + created: "2020-06-22T18:37:46+02:00" + dependencies: + - condition: agent.enabled + name: agent + repository: "" + version: 1.8-dev + - condition: config.enabled + name: config + repository: "" + version: 1.8-dev + - condition: global.hubble.relay.enabled + name: hubble-relay + repository: "" + version: 1.8-dev + - condition: global.hubble.ui.enabled + name: hubble-ui + repository: "" + version: 1.8-dev + - condition: operator.enabled + name: operator + repository: "" + version: 1.8-dev + - condition: global.etcd.managed + name: managed-etcd + repository: "" + version: 1.8-dev + - condition: preflight.enabled + name: preflight + repository: "" + version: 1.8-dev + - condition: global.nodeinit.enabled + name: nodeinit + repository: "" + version: 1.8-dev + - condition: global.prometheus.enabled && global.prometheus.serviceMonitor.enabled + name: prometheus-crd + repository: "" + version: 1.8-dev + description: Helm chart for Cilium + digest: c983eb0af7eb6d3fbfb5d94c5954ca91db853b359173c7932e44eb1c3b4de45d + home: https://cilium.io/ + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.8-dev.tgz + version: 1.8-dev + - apiVersion: v1 + appVersion: 1.7.16 + created: "2021-04-20T16:49:47-07:00" + dependencies: + - condition: agent.enabled + name: agent + repository: "" + version: 1.7.16 + - condition: config.enabled + name: config + repository: "" + version: 1.7.16 + - condition: operator.enabled + name: operator + repository: "" + version: 1.7.16 + - condition: global.etcd.managed + name: managed-etcd + repository: "" + version: 1.7.16 + - condition: preflight.enabled + name: preflight + repository: "" + version: 1.7.16 + - condition: global.nodeinit.enabled + name: nodeinit + repository: "" + version: 1.7.16 + description: Helm chart for Cilium + digest: 9204375c38a8074c5b6d0623bdf720025beeb52f25d54bc736705a10e393c769 + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.7.16.tgz + version: 1.7.16 + - apiVersion: v1 + appVersion: 1.7.15 + created: "2021-03-10T10:14:18-08:00" + dependencies: + - condition: agent.enabled + name: agent + repository: "" + version: 1.7.15 + - condition: config.enabled + name: config + repository: "" + version: 1.7.15 + - condition: operator.enabled + name: operator + repository: "" + version: 1.7.15 + - condition: global.etcd.managed + name: managed-etcd + repository: "" + version: 1.7.15 + - condition: preflight.enabled + name: preflight + repository: "" + version: 1.7.15 + - condition: global.nodeinit.enabled + name: nodeinit + repository: "" + version: 1.7.15 + description: Helm chart for Cilium + digest: 1de65baf4e3a217318dabfcc419a2caeb7b669d292fe6caf87bc214f4abae529 + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.7.15.tgz + version: 1.7.15 + - apiVersion: v1 + appVersion: 1.7.14 + created: "2021-02-25T14:51:11-08:00" + dependencies: + - condition: agent.enabled + name: agent + repository: "" + version: 1.7.14 + - condition: config.enabled + name: config + repository: "" + version: 1.7.14 + - condition: operator.enabled + name: operator + repository: "" + version: 1.7.14 + - condition: global.etcd.managed + name: managed-etcd + repository: "" + version: 1.7.14 + - condition: preflight.enabled + name: preflight + repository: "" + version: 1.7.14 + - condition: global.nodeinit.enabled + name: nodeinit + repository: "" + version: 1.7.14 + description: Helm chart for Cilium + digest: fac25151481c997b7d130612fbaea6c26f79e5983272541f720f4d8682bdca81 + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.7.14.tgz + version: 1.7.14 + - apiVersion: v1 + appVersion: 1.7.13 + created: "2021-01-27T14:58:32-08:00" + dependencies: + - condition: agent.enabled + name: agent + repository: "" + version: 1.7.13 + - condition: config.enabled + name: config + repository: "" + version: 1.7.13 + - condition: operator.enabled + name: operator + repository: "" + version: 1.7.13 + - condition: global.etcd.managed + name: managed-etcd + repository: "" + version: 1.7.13 + - condition: preflight.enabled + name: preflight + repository: "" + version: 1.7.13 + - condition: global.nodeinit.enabled + name: nodeinit + repository: "" + version: 1.7.13 + description: Helm chart for Cilium + digest: 716cea4afa9ba1956eaa51af7e3694d736fbebc96af3dce00141bd3efd44b91a + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.7.13.tgz + version: 1.7.13 + - apiVersion: v1 + appVersion: 1.7.12 + created: "2020-12-04T12:56:11+01:00" + dependencies: + - condition: agent.enabled + name: agent + repository: "" + version: 1.7.12 + - condition: config.enabled + name: config + repository: "" + version: 1.7.12 + - condition: operator.enabled + name: operator + repository: "" + version: 1.7.12 + - condition: global.etcd.managed + name: managed-etcd + repository: "" + version: 1.7.12 + - condition: preflight.enabled + name: preflight + repository: "" + version: 1.7.12 + - condition: global.nodeinit.enabled + name: nodeinit + repository: "" + version: 1.7.12 + description: Helm chart for Cilium + digest: 56c8c1292802b76e19b62526acd1702ef655ad71fcafdae0d1328ffe5a8d7cbf + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.7.12.tgz + version: 1.7.12 + - apiVersion: v1 + appVersion: 1.7.11 + created: "2020-10-27T18:55:05-07:00" + dependencies: + - condition: agent.enabled + name: agent + repository: "" + version: 1.7.11 + - condition: config.enabled + name: config + repository: "" + version: 1.7.11 + - condition: operator.enabled + name: operator + repository: "" + version: 1.7.11 + - condition: global.etcd.managed + name: managed-etcd + repository: "" + version: 1.7.11 + - condition: preflight.enabled + name: preflight + repository: "" + version: 1.7.11 + - condition: global.nodeinit.enabled + name: nodeinit + repository: "" + version: 1.7.11 + description: Helm chart for Cilium + digest: 7a4b2f7970aeff7c0201eb8e7ce8a75c16ec53af79ff609de8ec087ad59d63a1 + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.7.11.tgz + version: 1.7.11 + - apiVersion: v1 + appVersion: 1.7.10 + created: "2020-09-30T17:01:58-07:00" + dependencies: + - condition: agent.enabled + name: agent + repository: "" + version: 1.7.10 + - condition: config.enabled + name: config + repository: "" + version: 1.7.10 + - condition: operator.enabled + name: operator + repository: "" + version: 1.7.10 + - condition: global.etcd.managed + name: managed-etcd + repository: "" + version: 1.7.10 + - condition: preflight.enabled + name: preflight + repository: "" + version: 1.7.10 + - condition: global.nodeinit.enabled + name: nodeinit + repository: "" + version: 1.7.10 + description: Helm chart for Cilium + digest: dfd2d4f728aae6eb8b64243412ae20864e7d7da3afe09d2c38ddd04d070055dd + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.7.10.tgz + version: 1.7.10 + - apiVersion: v1 + appVersion: 1.7.9 + created: "2020-09-01T20:25:45-07:00" + dependencies: + - condition: agent.enabled + name: agent + repository: "" + version: 1.7.9 + - condition: config.enabled + name: config + repository: "" + version: 1.7.9 + - condition: operator.enabled + name: operator + repository: "" + version: 1.7.9 + - condition: global.etcd.managed + name: managed-etcd + repository: "" + version: 1.7.9 + - condition: preflight.enabled + name: preflight + repository: "" + version: 1.7.9 + - condition: global.nodeinit.enabled + name: nodeinit + repository: "" + version: 1.7.9 + description: Helm chart for Cilium + digest: 23fb1a429a64a7ea29d29f6ce0b00886f9a1e2f6c3c76b920944ba8ab800d6dd + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.7.9.tgz + version: 1.7.9 + - apiVersion: v1 + appVersion: 1.7.8 + created: "2020-08-28T13:38:04-07:00" + dependencies: + - condition: agent.enabled + name: agent + repository: "" + version: 1.7.8 + - condition: config.enabled + name: config + repository: "" + version: 1.7.8 + - condition: operator.enabled + name: operator + repository: "" + version: 1.7.8 + - condition: global.etcd.managed + name: managed-etcd + repository: "" + version: 1.7.8 + - condition: preflight.enabled + name: preflight + repository: "" + version: 1.7.8 + - condition: global.nodeinit.enabled + name: nodeinit + repository: "" + version: 1.7.8 + description: Helm chart for Cilium + digest: 29011b46577f16ab636b685e72ff38db4f66807f29b7373156c1f46d8a8bc7a0 + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.7.8.tgz + version: 1.7.8 + - apiVersion: v1 + appVersion: 1.7.7 + created: "2020-08-04T18:44:45-07:00" + dependencies: + - condition: agent.enabled + name: agent + repository: "" + version: 1.7.7 + - condition: config.enabled + name: config + repository: "" + version: 1.7.7 + - condition: operator.enabled + name: operator + repository: "" + version: 1.7.7 + - condition: global.etcd.managed + name: managed-etcd + repository: "" + version: 1.7.7 + - condition: preflight.enabled + name: preflight + repository: "" + version: 1.7.7 + - condition: global.nodeinit.enabled + name: nodeinit + repository: "" + version: 1.7.7 + description: Helm chart for Cilium + digest: 6309bc42ebcb394f4abb2e84a00f0f1b72c7086d07925eb7c1e406e056b0d527 + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.7.7.tgz + version: 1.7.7 + - apiVersion: v1 + appVersion: 1.7.6 + created: "2020-07-02T14:58:08-07:00" + dependencies: + - condition: agent.enabled + name: agent + repository: "" + version: 1.7.6 + - condition: config.enabled + name: config + repository: "" + version: 1.7.6 + - condition: operator.enabled + name: operator + repository: "" + version: 1.7.6 + - condition: global.etcd.managed + name: managed-etcd + repository: "" + version: 1.7.6 + - condition: preflight.enabled + name: preflight + repository: "" + version: 1.7.6 + - condition: global.nodeinit.enabled + name: nodeinit + repository: "" + version: 1.7.6 + description: Helm chart for Cilium + digest: 14bf5c70fed0c9bd49adb4e6d7187bc4502f38afc8d6cf734014600b63527839 + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.7.6.tgz + version: 1.7.6 + - apiVersion: v1 + appVersion: 1.7.5 + created: "2020-06-12T15:00:23+02:00" + dependencies: + - condition: agent.enabled + name: agent + repository: "" + version: 1.7.5 + - condition: config.enabled + name: config + repository: "" + version: 1.7.5 + - condition: operator.enabled + name: operator + repository: "" + version: 1.7.5 + - condition: global.etcd.managed + name: managed-etcd + repository: "" + version: 1.7.5 + - condition: preflight.enabled + name: preflight + repository: "" + version: 1.7.5 + - condition: global.nodeinit.enabled + name: nodeinit + repository: "" + version: 1.7.5 + description: Helm chart for Cilium + digest: 17ad591bbbdcfa8c90db66dca34a79b7bde96f004d6f573524e399e2d88baa5a + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.7.5.tgz + version: 1.7.5 + - apiVersion: v1 + appVersion: 1.7.4 + created: "2020-05-15T15:12:07-07:00" + dependencies: + - condition: agent.enabled + name: agent + repository: "" + version: 1.7.4 + - condition: config.enabled + name: config + repository: "" + version: 1.7.4 + - condition: operator.enabled + name: operator + repository: "" + version: 1.7.4 + - condition: global.etcd.managed + name: managed-etcd + repository: "" + version: 1.7.4 + - condition: preflight.enabled + name: preflight + repository: "" + version: 1.7.4 + - condition: global.nodeinit.enabled + name: nodeinit + repository: "" + version: 1.7.4 + description: Helm chart for Cilium + digest: ea67198ed253ea337831b46705b712bf34fcf1658bd5c4637208186f235c2218 + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.7.4.tgz + version: 1.7.4 + - apiVersion: v1 + appVersion: 1.7.3 + created: "2020-04-29T16:14:35-07:00" + dependencies: + - condition: agent.enabled + name: agent + repository: "" + version: 1.7.3 + - condition: config.enabled + name: config + repository: "" + version: 1.7.3 + - condition: operator.enabled + name: operator + repository: "" + version: 1.7.3 + - condition: global.etcd.managed + name: managed-etcd + repository: "" + version: 1.7.3 + - condition: preflight.enabled + name: preflight + repository: "" + version: 1.7.3 + - condition: global.nodeinit.enabled + name: nodeinit + repository: "" + version: 1.7.3 + description: Helm chart for Cilium + digest: 19407689436278319dff249ffa521a7df11f91e187f21e4dfab482bea5d242dc + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.7.3.tgz + version: 1.7.3 + - apiVersion: v1 + appVersion: 1.7.2 + created: "2020-04-03T12:28:11-07:00" + dependencies: + - condition: agent.enabled + name: agent + repository: "" + version: 1.7.2 + - condition: config.enabled + name: config + repository: "" + version: 1.7.2 + - condition: operator.enabled + name: operator + repository: "" + version: 1.7.2 + - condition: global.etcd.managed + name: managed-etcd + repository: "" + version: 1.7.2 + - condition: preflight.enabled + name: preflight + repository: "" + version: 1.7.2 + - condition: global.nodeinit.enabled + name: nodeinit + repository: "" + version: 1.7.2 + description: Helm chart for Cilium + digest: 42379329de4a22bc4f260760b39671f64ee43f99eec38575251b0b3ab45f1395 + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.7.2.tgz + version: 1.7.2 + - apiVersion: v1 + appVersion: 1.7.1 + created: "2020-03-04T15:52:49-08:00" + dependencies: + - condition: agent.enabled + name: agent + repository: "" + version: 1.7.1 + - condition: config.enabled + name: config + repository: "" + version: 1.7.1 + - condition: operator.enabled + name: operator + repository: "" + version: 1.7.1 + - condition: global.etcd.managed + name: managed-etcd + repository: "" + version: 1.7.1 + - condition: preflight.enabled + name: preflight + repository: "" + version: 1.7.1 + - condition: global.nodeinit.enabled + name: nodeinit + repository: "" + version: 1.7.1 + description: Helm chart for Cilium + digest: 7de9a307040a2d0f91e3bdf7f0920a1a8b601734b2b6d3652ff66dab51ce4a35 + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.7.1.tgz + version: 1.7.1 + - apiVersion: v1 + appVersion: 1.7.0 + created: "2020-02-18T17:31:32-08:00" + dependencies: + - condition: agent.enabled + name: agent + repository: "" + version: 1.7.0 + - condition: config.enabled + name: config + repository: "" + version: 1.7.0 + - condition: operator.enabled + name: operator + repository: "" + version: 1.7.0 + - condition: global.etcd.managed + name: managed-etcd + repository: "" + version: 1.7.0 + - condition: preflight.enabled + name: preflight + repository: "" + version: 1.7.0 + - condition: global.nodeinit.enabled + name: nodeinit + repository: "" + version: 1.7.0 + description: Helm chart for Cilium + digest: eec8d07eec1e680c2e87cc46bf188b0ad2ab9d13486e7d263ea21d15e94f96d2 + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.7.0.tgz + version: 1.7.0 + - apiVersion: v1 + appVersion: 1.7.0-rc4 + created: "2020-02-12T16:31:15-08:00" + dependencies: + - condition: agent.enabled + name: agent + repository: "" + version: 1.7.0-rc4 + - condition: config.enabled + name: config + repository: "" + version: 1.7.0-rc4 + - condition: operator.enabled + name: operator + repository: "" + version: 1.7.0-rc4 + - condition: global.etcd.managed + name: managed-etcd + repository: "" + version: 1.7.0-rc4 + - condition: preflight.enabled + name: preflight + repository: "" + version: 1.7.0-rc4 + - condition: global.nodeinit.enabled + name: nodeinit + repository: "" + version: 1.7.0-rc4 + description: Helm chart for Cilium + digest: 4e27fc657999f8e1ff68315d563d5a9454cd0f76bdb8a3abbf06e9e0030b0612 + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.7.0-rc4.tgz + version: 1.7.0-rc4 + - apiVersion: v1 + appVersion: 1.7.0-rc3 + created: "2020-02-04T11:22:10-08:00" + dependencies: + - condition: agent.enabled + name: agent + repository: "" + version: 1.7.0-rc3 + - condition: config.enabled + name: config + repository: "" + version: 1.7.0-rc3 + - condition: operator.enabled + name: operator + repository: "" + version: 1.7.0-rc3 + - condition: global.etcd.managed + name: managed-etcd + repository: "" + version: 1.7.0-rc3 + - condition: preflight.enabled + name: preflight + repository: "" + version: 1.7.0-rc3 + - condition: global.nodeinit.enabled + name: nodeinit + repository: "" + version: 1.7.0-rc3 + description: Helm chart for Cilium + digest: 931c4d32f2cba0e8a49a3690a48a9459d8cab0b572fb914998164a60c2fe48b3 + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.7.0-rc3.tgz + version: 1.7.0-rc3 + - apiVersion: v1 + appVersion: 1.7-dev + created: "2020-02-19T09:16:25+01:00" + dependencies: + - condition: agent.enabled + name: agent + repository: "" + version: 1.7-dev + - condition: config.enabled + name: config + repository: "" + version: 1.7-dev + - condition: operator.enabled + name: operator + repository: "" + version: 1.7-dev + - condition: global.etcd.managed + name: managed-etcd + repository: "" + version: 1.7-dev + - condition: preflight.enabled + name: preflight + repository: "" + version: 1.7-dev + - condition: global.nodeinit.enabled + name: nodeinit + repository: "" + version: 1.7-dev + description: Helm chart for Cilium + digest: c7637bb01c5118d7f989a9d8710fcaa0ac48374141291c8cccf68e627cbac720 + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.7-dev.tgz + version: 1.7-dev + - apiVersion: v1 + appVersion: 1.6.12 + created: "2020-09-30T17:01:49-07:00" + dependencies: + - condition: agent.enabled + name: agent + repository: "" + version: 1.6.12 + - condition: config.enabled + name: config + repository: "" + version: 1.6.12 + - condition: operator.enabled + name: operator + repository: "" + version: 1.6.12 + - condition: global.etcd.managed + name: managed-etcd + repository: "" + version: 1.6.12 + - condition: preflight.enabled + name: preflight + repository: "" + version: 1.6.12 + - condition: global.nodeinit.enabled + name: nodeinit + repository: "" + version: 1.6.12 + description: Helm chart for Cilium + digest: 46bfb182c186be5a34d428b7c7e1b9615d55385aeb66da0517b3990317150c4e + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.6.12.tgz + version: 1.6.12 + - apiVersion: v1 + appVersion: 1.6.11 + created: "2020-08-03T16:53:07-07:00" + dependencies: + - condition: agent.enabled + name: agent + repository: "" + version: 1.6.11 + - condition: config.enabled + name: config + repository: "" + version: 1.6.11 + - condition: operator.enabled + name: operator + repository: "" + version: 1.6.11 + - condition: global.etcd.managed + name: managed-etcd + repository: "" + version: 1.6.11 + - condition: preflight.enabled + name: preflight + repository: "" + version: 1.6.11 + - condition: global.nodeinit.enabled + name: nodeinit + repository: "" + version: 1.6.11 + description: Helm chart for Cilium + digest: e53e95552cc59e6ae926f51382c081a484ec03dc70b88f274619ce7778390066 + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.6.11.tgz + version: 1.6.11 + - apiVersion: v1 + appVersion: 1.6.10 + created: "2020-07-02T20:16:39+02:00" + dependencies: + - condition: agent.enabled + name: agent + repository: "" + version: 1.6.10 + - condition: config.enabled + name: config + repository: "" + version: 1.6.10 + - condition: operator.enabled + name: operator + repository: "" + version: 1.6.10 + - condition: global.etcd.managed + name: managed-etcd + repository: "" + version: 1.6.10 + - condition: preflight.enabled + name: preflight + repository: "" + version: 1.6.10 + - condition: global.nodeinit.enabled + name: nodeinit + repository: "" + version: 1.6.10 + description: Helm chart for Cilium + digest: 59f42143afd725c75eec7198691f00a717e4602c773d38080526ef56a6a45693 + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.6.10.tgz + version: 1.6.10 + - apiVersion: v1 + appVersion: 1.6.9 + created: "2020-06-05T16:58:12-07:00" + dependencies: + - condition: agent.enabled + name: agent + repository: "" + version: 1.6.9 + - condition: config.enabled + name: config + repository: "" + version: 1.6.9 + - condition: operator.enabled + name: operator + repository: "" + version: 1.6.9 + - condition: global.etcd.managed + name: managed-etcd + repository: "" + version: 1.6.9 + - condition: preflight.enabled + name: preflight + repository: "" + version: 1.6.9 + - condition: global.nodeinit.enabled + name: nodeinit + repository: "" + version: 1.6.9 + description: Helm chart for Cilium + digest: 7596d3ca7c0f74c7942a5299f968794267a9988513b72224df4759d71a5e1218 + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.6.9.tgz + version: 1.6.9 + - apiVersion: v1 + appVersion: 1.6.8 + created: "2020-03-25T15:02:28-07:00" + dependencies: + - condition: agent.enabled + name: agent + repository: "" + version: 1.6.8 + - condition: config.enabled + name: config + repository: "" + version: 1.6.8 + - condition: operator.enabled + name: operator + repository: "" + version: 1.6.8 + - condition: global.etcd.managed + name: managed-etcd + repository: "" + version: 1.6.8 + - condition: preflight.enabled + name: preflight + repository: "" + version: 1.6.8 + - condition: global.nodeinit.enabled + name: nodeinit + repository: "" + version: 1.6.8 + description: Helm chart for Cilium + digest: 408ac5e7b1e465f696721d2c341a02f28bf788264ca773010a464ab706c9d4b4 + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.6.8.tgz + version: 1.6.8 + - apiVersion: v1 + appVersion: 1.6.7 + created: "2020-03-04T15:08:04-08:00" + dependencies: + - condition: agent.enabled + name: agent + repository: "" + version: 1.6.7 + - condition: config.enabled + name: config + repository: "" + version: 1.6.7 + - condition: operator.enabled + name: operator + repository: "" + version: 1.6.7 + - condition: global.etcd.managed + name: managed-etcd + repository: "" + version: 1.6.7 + - condition: preflight.enabled + name: preflight + repository: "" + version: 1.6.7 + - condition: global.nodeinit.enabled + name: nodeinit + repository: "" + version: 1.6.7 + description: Helm chart for Cilium + digest: d11fa9a2ee682b8202881eede2dd902236b474caf2f128f566cd4738405aafe0 + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.6.7.tgz + version: 1.6.7 + - apiVersion: v1 + appVersion: 1.6.6 + created: "2020-02-03T15:14:02-08:00" + dependencies: + - condition: agent.enabled + name: agent + repository: "" + version: 1.6.6 + - condition: config.enabled + name: config + repository: "" + version: 1.6.6 + - condition: operator.enabled + name: operator + repository: "" + version: 1.6.6 + - condition: global.etcd.managed + name: managed-etcd + repository: "" + version: 1.6.6 + - condition: preflight.enabled + name: preflight + repository: "" + version: 1.6.6 + - condition: global.nodeinit.enabled + name: nodeinit + repository: "" + version: 1.6.6 + description: Helm chart for Cilium + digest: 94a2c639a35a9f64c0b69e03d616208494c6cf8e8e486a85fc38e060468284a1 + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.6.6.tgz + version: 1.6.6 + - apiVersion: v1 + appVersion: 1.6.5 + created: "2020-01-23T10:16:22-08:00" + dependencies: + - condition: agent.enabled + name: agent + repository: "" + version: 1.6.5 + - condition: config.enabled + name: config + repository: "" + version: 1.6.5 + - condition: operator.enabled + name: operator + repository: "" + version: 1.6.5 + - condition: global.etcd.managed + name: managed-etcd + repository: "" + version: 1.6.5 + - condition: preflight.enabled + name: preflight + repository: "" + version: 1.6.5 + - condition: global.nodeinit.enabled + name: nodeinit + repository: "" + version: 1.6.5 + description: Helm chart for Cilium + digest: 6184b1591ef808dc4741fce1c97be541118e804293ba7bb4a8aa9c570067cc62 + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.6.5.tgz + version: 1.6.5 + - apiVersion: v1 + appVersion: 1.6-dev + created: "2020-02-11T15:01:31+01:00" + dependencies: + - condition: agent.enabled + name: agent + repository: "" + version: 1.6-dev + - condition: config.enabled + name: config + repository: "" + version: 1.6-dev + - condition: operator.enabled + name: operator + repository: "" + version: 1.6-dev + - condition: global.etcd.managed + name: managed-etcd + repository: "" + version: 1.6-dev + - condition: preflight.enabled + name: preflight + repository: "" + version: 1.6-dev + - condition: global.nodeinit.enabled + name: nodeinit + repository: "" + version: 1.6-dev + description: Helm chart for Cilium + digest: e6a0373be435d69521c649d8dff23e4a5f1dc7dd3be392d14d2c2d9748907627 + name: cilium + sources: + - https://github.com/cilium/cilium + urls: + - cilium-1.6-dev.tgz + version: 1.6-dev +generated: "2022-01-18T18:00:24.170997677-08:00" diff --git a/vendor/github.com/cilium/charts/prepare_artifacts.sh b/vendor/github.com/cilium/charts/prepare_artifacts.sh new file mode 100644 index 0000000000..5fc2939099 --- /dev/null +++ b/vendor/github.com/cilium/charts/prepare_artifacts.sh @@ -0,0 +1,30 @@ +#!/bin/bash + +CWD=$(dirname $(readlink -ne $BASH_SOURCE)) + +set -e + +CILIUM_DIR=$1 +if [ $# -lt 1 ] || [ ! -d $CILIUM_DIR ]; then + echo "usage: $0 " 1>&2 + exit 1 +fi + +if [ ! -e $CILIUM_DIR/install/kubernetes/cilium/values.yaml ]; then + echo "Did you specify a Cilium repository path correctly?" + echo "command: $0 $1" + exit 1 +fi + +CHART_PATH="$CILIUM_DIR/install/kubernetes/cilium/Chart.yaml" +VERSION="$(awk '/version:/ { print $2; exit; } ' $CHART_PATH)" +cd $CILIUM_DIR/install/kubernetes +helm package --destination "$CWD" cilium +cd - +helm repo index . --merge index.yaml +$EDITOR README.md +git add README.md index.yaml cilium-$VERSION.tgz +git commit -s -m "Add $VERSION@$(cd $CILIUM_DIR; git rev-parse HEAD) ⎈" +./fix_dates.sh +git add index.yaml +git commit --amend diff --git a/vendor/github.com/containerd/containerd/LICENSE b/vendor/github.com/containerd/containerd/LICENSE new file mode 100644 index 0000000000..584149b6ee --- /dev/null +++ b/vendor/github.com/containerd/containerd/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright The containerd Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/containerd/NOTICE b/vendor/github.com/containerd/containerd/NOTICE new file mode 100644 index 0000000000..8915f02773 --- /dev/null +++ b/vendor/github.com/containerd/containerd/NOTICE @@ -0,0 +1,16 @@ +Docker +Copyright 2012-2015 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/containerd/containerd/archive/compression/compression.go b/vendor/github.com/containerd/containerd/archive/compression/compression.go new file mode 100644 index 0000000000..a883e4d58c --- /dev/null +++ b/vendor/github.com/containerd/containerd/archive/compression/compression.go @@ -0,0 +1,287 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package compression + +import ( + "bufio" + "bytes" + "compress/gzip" + "context" + "fmt" + "io" + "os" + "os/exec" + "strconv" + "sync" + + "github.com/containerd/containerd/log" + "github.com/klauspost/compress/zstd" +) + +type ( + // Compression is the state represents if compressed or not. + Compression int +) + +const ( + // Uncompressed represents the uncompressed. + Uncompressed Compression = iota + // Gzip is gzip compression algorithm. + Gzip + // Zstd is zstd compression algorithm. + Zstd +) + +const disablePigzEnv = "CONTAINERD_DISABLE_PIGZ" + +var ( + initPigz sync.Once + unpigzPath string +) + +var ( + bufioReader32KPool = &sync.Pool{ + New: func() interface{} { return bufio.NewReaderSize(nil, 32*1024) }, + } +) + +// DecompressReadCloser include the stream after decompress and the compress method detected. +type DecompressReadCloser interface { + io.ReadCloser + // GetCompression returns the compress method which is used before decompressing + GetCompression() Compression +} + +type readCloserWrapper struct { + io.Reader + compression Compression + closer func() error +} + +func (r *readCloserWrapper) Close() error { + if r.closer != nil { + return r.closer() + } + return nil +} + +func (r *readCloserWrapper) GetCompression() Compression { + return r.compression +} + +type writeCloserWrapper struct { + io.Writer + closer func() error +} + +func (w *writeCloserWrapper) Close() error { + if w.closer != nil { + w.closer() + } + return nil +} + +type bufferedReader struct { + buf *bufio.Reader +} + +func newBufferedReader(r io.Reader) *bufferedReader { + buf := bufioReader32KPool.Get().(*bufio.Reader) + buf.Reset(r) + return &bufferedReader{buf} +} + +func (r *bufferedReader) Read(p []byte) (n int, err error) { + if r.buf == nil { + return 0, io.EOF + } + n, err = r.buf.Read(p) + if err == io.EOF { + r.buf.Reset(nil) + bufioReader32KPool.Put(r.buf) + r.buf = nil + } + return +} + +func (r *bufferedReader) Peek(n int) ([]byte, error) { + if r.buf == nil { + return nil, io.EOF + } + return r.buf.Peek(n) +} + +// DetectCompression detects the compression algorithm of the source. +func DetectCompression(source []byte) Compression { + for compression, m := range map[Compression][]byte{ + Gzip: {0x1F, 0x8B, 0x08}, + Zstd: {0x28, 0xb5, 0x2f, 0xfd}, + } { + if len(source) < len(m) { + // Len too short + continue + } + if bytes.Equal(m, source[:len(m)]) { + return compression + } + } + return Uncompressed +} + +// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. +func DecompressStream(archive io.Reader) (DecompressReadCloser, error) { + buf := newBufferedReader(archive) + bs, err := buf.Peek(10) + if err != nil && err != io.EOF { + // Note: we'll ignore any io.EOF error because there are some odd + // cases where the layer.tar file will be empty (zero bytes) and + // that results in an io.EOF from the Peek() call. So, in those + // cases we'll just treat it as a non-compressed stream and + // that means just create an empty layer. + // See Issue docker/docker#18170 + return nil, err + } + + switch compression := DetectCompression(bs); compression { + case Uncompressed: + return &readCloserWrapper{ + Reader: buf, + compression: compression, + }, nil + case Gzip: + ctx, cancel := context.WithCancel(context.Background()) + gzReader, err := gzipDecompress(ctx, buf) + if err != nil { + cancel() + return nil, err + } + + return &readCloserWrapper{ + Reader: gzReader, + compression: compression, + closer: func() error { + cancel() + return gzReader.Close() + }, + }, nil + case Zstd: + zstdReader, err := zstd.NewReader(buf) + if err != nil { + return nil, err + } + return &readCloserWrapper{ + Reader: zstdReader, + compression: compression, + closer: func() error { + zstdReader.Close() + return nil + }, + }, nil + + default: + return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension()) + } +} + +// CompressStream compresses the dest with specified compression algorithm. +func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { + switch compression { + case Uncompressed: + return &writeCloserWrapper{dest, nil}, nil + case Gzip: + return gzip.NewWriter(dest), nil + case Zstd: + return zstd.NewWriter(dest) + default: + return nil, fmt.Errorf("unsupported compression format %s", (&compression).Extension()) + } +} + +// Extension returns the extension of a file that uses the specified compression algorithm. +func (compression *Compression) Extension() string { + switch *compression { + case Gzip: + return "gz" + case Zstd: + return "zst" + } + return "" +} + +func gzipDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) { + initPigz.Do(func() { + if unpigzPath = detectPigz(); unpigzPath != "" { + log.L.Debug("using pigz for decompression") + } + }) + + if unpigzPath == "" { + return gzip.NewReader(buf) + } + + return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf) +} + +func cmdStream(cmd *exec.Cmd, in io.Reader) (io.ReadCloser, error) { + reader, writer := io.Pipe() + + cmd.Stdin = in + cmd.Stdout = writer + + var errBuf bytes.Buffer + cmd.Stderr = &errBuf + + if err := cmd.Start(); err != nil { + return nil, err + } + + go func() { + if err := cmd.Wait(); err != nil { + writer.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) + } else { + writer.Close() + } + }() + + return reader, nil +} + +func detectPigz() string { + path, err := exec.LookPath("unpigz") + if err != nil { + log.L.WithError(err).Debug("unpigz not found, falling back to go gzip") + return "" + } + + // Check if pigz disabled via CONTAINERD_DISABLE_PIGZ env variable + value := os.Getenv(disablePigzEnv) + if value == "" { + return path + } + + disable, err := strconv.ParseBool(value) + if err != nil { + log.L.WithError(err).Warnf("could not parse %s: %s", disablePigzEnv, value) + return path + } + + if disable { + return "" + } + + return path +} diff --git a/vendor/github.com/containerd/containerd/content/adaptor.go b/vendor/github.com/containerd/containerd/content/adaptor.go new file mode 100644 index 0000000000..88bad2610e --- /dev/null +++ b/vendor/github.com/containerd/containerd/content/adaptor.go @@ -0,0 +1,52 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package content + +import ( + "strings" + + "github.com/containerd/containerd/filters" +) + +// AdaptInfo returns `filters.Adaptor` that handles `content.Info`. +func AdaptInfo(info Info) filters.Adaptor { + return filters.AdapterFunc(func(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + + switch fieldpath[0] { + case "digest": + return info.Digest.String(), true + case "size": + // TODO: support size based filtering + case "labels": + return checkMap(fieldpath[1:], info.Labels) + } + + return "", false + }) +} + +func checkMap(fieldpath []string, m map[string]string) (string, bool) { + if len(m) == 0 { + return "", false + } + + value, ok := m[strings.Join(fieldpath, ".")] + return value, ok +} diff --git a/vendor/github.com/containerd/containerd/content/content.go b/vendor/github.com/containerd/containerd/content/content.go new file mode 100644 index 0000000000..ff17a8417b --- /dev/null +++ b/vendor/github.com/containerd/containerd/content/content.go @@ -0,0 +1,182 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package content + +import ( + "context" + "io" + "time" + + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ReaderAt extends the standard io.ReaderAt interface with reporting of Size and io.Closer +type ReaderAt interface { + io.ReaderAt + io.Closer + Size() int64 +} + +// Provider provides a reader interface for specific content +type Provider interface { + // ReaderAt only requires desc.Digest to be set. + // Other fields in the descriptor may be used internally for resolving + // the location of the actual data. + ReaderAt(ctx context.Context, desc ocispec.Descriptor) (ReaderAt, error) +} + +// Ingester writes content +type Ingester interface { + // Some implementations require WithRef to be included in opts. + Writer(ctx context.Context, opts ...WriterOpt) (Writer, error) +} + +// Info holds content specific information +// +// TODO(stevvooe): Consider a very different name for this struct. Info is way +// to general. It also reads very weird in certain context, like pluralization. +type Info struct { + Digest digest.Digest + Size int64 + CreatedAt time.Time + UpdatedAt time.Time + Labels map[string]string +} + +// Status of a content operation +type Status struct { + Ref string + Offset int64 + Total int64 + Expected digest.Digest + StartedAt time.Time + UpdatedAt time.Time +} + +// WalkFunc defines the callback for a blob walk. +type WalkFunc func(Info) error + +// Manager provides methods for inspecting, listing and removing content. +type Manager interface { + // Info will return metadata about content available in the content store. + // + // If the content is not present, ErrNotFound will be returned. + Info(ctx context.Context, dgst digest.Digest) (Info, error) + + // Update updates mutable information related to content. + // If one or more fieldpaths are provided, only those + // fields will be updated. + // Mutable fields: + // labels.* + Update(ctx context.Context, info Info, fieldpaths ...string) (Info, error) + + // Walk will call fn for each item in the content store which + // match the provided filters. If no filters are given all + // items will be walked. + Walk(ctx context.Context, fn WalkFunc, filters ...string) error + + // Delete removes the content from the store. + Delete(ctx context.Context, dgst digest.Digest) error +} + +// IngestManager provides methods for managing ingests. +type IngestManager interface { + // Status returns the status of the provided ref. + Status(ctx context.Context, ref string) (Status, error) + + // ListStatuses returns the status of any active ingestions whose ref match the + // provided regular expression. If empty, all active ingestions will be + // returned. + ListStatuses(ctx context.Context, filters ...string) ([]Status, error) + + // Abort completely cancels the ingest operation targeted by ref. + Abort(ctx context.Context, ref string) error +} + +// Writer handles the write of content into a content store +type Writer interface { + // Close closes the writer, if the writer has not been + // committed this allows resuming or aborting. + // Calling Close on a closed writer will not error. + io.WriteCloser + + // Digest may return empty digest or panics until committed. + Digest() digest.Digest + + // Commit commits the blob (but no roll-back is guaranteed on an error). + // size and expected can be zero-value when unknown. + // Commit always closes the writer, even on error. + // ErrAlreadyExists aborts the writer. + Commit(ctx context.Context, size int64, expected digest.Digest, opts ...Opt) error + + // Status returns the current state of write + Status() (Status, error) + + // Truncate updates the size of the target blob + Truncate(size int64) error +} + +// Store combines the methods of content-oriented interfaces into a set that +// are commonly provided by complete implementations. +type Store interface { + Manager + Provider + IngestManager + Ingester +} + +// Opt is used to alter the mutable properties of content +type Opt func(*Info) error + +// WithLabels allows labels to be set on content +func WithLabels(labels map[string]string) Opt { + return func(info *Info) error { + info.Labels = labels + return nil + } +} + +// WriterOpts is internally used by WriterOpt. +type WriterOpts struct { + Ref string + Desc ocispec.Descriptor +} + +// WriterOpt is used for passing options to Ingester.Writer. +type WriterOpt func(*WriterOpts) error + +// WithDescriptor specifies an OCI descriptor. +// Writer may optionally use the descriptor internally for resolving +// the location of the actual data. +// Write does not require any field of desc to be set. +// If the data size is unknown, desc.Size should be set to 0. +// Some implementations may also accept negative values as "unknown". +func WithDescriptor(desc ocispec.Descriptor) WriterOpt { + return func(opts *WriterOpts) error { + opts.Desc = desc + return nil + } +} + +// WithRef specifies a ref string. +func WithRef(ref string) WriterOpt { + return func(opts *WriterOpts) error { + opts.Ref = ref + return nil + } +} diff --git a/vendor/github.com/containerd/containerd/content/helpers.go b/vendor/github.com/containerd/containerd/content/helpers.go new file mode 100644 index 0000000000..00fae1fc80 --- /dev/null +++ b/vendor/github.com/containerd/containerd/content/helpers.go @@ -0,0 +1,287 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package content + +import ( + "context" + "io" + "io/ioutil" + "math/rand" + "sync" + "time" + + "github.com/containerd/containerd/errdefs" + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +var bufPool = sync.Pool{ + New: func() interface{} { + buffer := make([]byte, 1<<20) + return &buffer + }, +} + +// NewReader returns a io.Reader from a ReaderAt +func NewReader(ra ReaderAt) io.Reader { + rd := io.NewSectionReader(ra, 0, ra.Size()) + return rd +} + +// ReadBlob retrieves the entire contents of the blob from the provider. +// +// Avoid using this for large blobs, such as layers. +func ReadBlob(ctx context.Context, provider Provider, desc ocispec.Descriptor) ([]byte, error) { + ra, err := provider.ReaderAt(ctx, desc) + if err != nil { + return nil, err + } + defer ra.Close() + + p := make([]byte, ra.Size()) + + n, err := ra.ReadAt(p, 0) + if err == io.EOF { + if int64(n) != ra.Size() { + err = io.ErrUnexpectedEOF + } else { + err = nil + } + } + return p, err +} + +// WriteBlob writes data with the expected digest into the content store. If +// expected already exists, the method returns immediately and the reader will +// not be consumed. +// +// This is useful when the digest and size are known beforehand. +// +// Copy is buffered, so no need to wrap reader in buffered io. +func WriteBlob(ctx context.Context, cs Ingester, ref string, r io.Reader, desc ocispec.Descriptor, opts ...Opt) error { + cw, err := OpenWriter(ctx, cs, WithRef(ref), WithDescriptor(desc)) + if err != nil { + if !errdefs.IsAlreadyExists(err) { + return errors.Wrap(err, "failed to open writer") + } + + return nil // all ready present + } + defer cw.Close() + + return Copy(ctx, cw, r, desc.Size, desc.Digest, opts...) +} + +// OpenWriter opens a new writer for the given reference, retrying if the writer +// is locked until the reference is available or returns an error. +func OpenWriter(ctx context.Context, cs Ingester, opts ...WriterOpt) (Writer, error) { + var ( + cw Writer + err error + retry = 16 + ) + for { + cw, err = cs.Writer(ctx, opts...) + if err != nil { + if !errdefs.IsUnavailable(err) { + return nil, err + } + + // TODO: Check status to determine if the writer is active, + // continue waiting while active, otherwise return lock + // error or abort. Requires asserting for an ingest manager + + select { + case <-time.After(time.Millisecond * time.Duration(rand.Intn(retry))): + if retry < 2048 { + retry = retry << 1 + } + continue + case <-ctx.Done(): + // Propagate lock error + return nil, err + } + + } + break + } + + return cw, err +} + +// Copy copies data with the expected digest from the reader into the +// provided content store writer. This copy commits the writer. +// +// This is useful when the digest and size are known beforehand. When +// the size or digest is unknown, these values may be empty. +// +// Copy is buffered, so no need to wrap reader in buffered io. +func Copy(ctx context.Context, cw Writer, r io.Reader, size int64, expected digest.Digest, opts ...Opt) error { + ws, err := cw.Status() + if err != nil { + return errors.Wrap(err, "failed to get status") + } + + if ws.Offset > 0 { + r, err = seekReader(r, ws.Offset, size) + if err != nil { + return errors.Wrapf(err, "unable to resume write to %v", ws.Ref) + } + } + + copied, err := copyWithBuffer(cw, r) + if err != nil { + return errors.Wrap(err, "failed to copy") + } + if size != 0 && copied < size-ws.Offset { + // Short writes would return its own error, this indicates a read failure + return errors.Wrapf(io.ErrUnexpectedEOF, "failed to read expected number of bytes") + } + + if err := cw.Commit(ctx, size, expected, opts...); err != nil { + if !errdefs.IsAlreadyExists(err) { + return errors.Wrapf(err, "failed commit on ref %q", ws.Ref) + } + } + + return nil +} + +// CopyReaderAt copies to a writer from a given reader at for the given +// number of bytes. This copy does not commit the writer. +func CopyReaderAt(cw Writer, ra ReaderAt, n int64) error { + ws, err := cw.Status() + if err != nil { + return err + } + + copied, err := copyWithBuffer(cw, io.NewSectionReader(ra, ws.Offset, n)) + if err != nil { + return errors.Wrap(err, "failed to copy") + } + if copied < n { + // Short writes would return its own error, this indicates a read failure + return errors.Wrap(io.ErrUnexpectedEOF, "failed to read expected number of bytes") + } + return nil +} + +// CopyReader copies to a writer from a given reader, returning +// the number of bytes copied. +// Note: if the writer has a non-zero offset, the total number +// of bytes read may be greater than those copied if the reader +// is not an io.Seeker. +// This copy does not commit the writer. +func CopyReader(cw Writer, r io.Reader) (int64, error) { + ws, err := cw.Status() + if err != nil { + return 0, errors.Wrap(err, "failed to get status") + } + + if ws.Offset > 0 { + r, err = seekReader(r, ws.Offset, 0) + if err != nil { + return 0, errors.Wrapf(err, "unable to resume write to %v", ws.Ref) + } + } + + return copyWithBuffer(cw, r) +} + +// seekReader attempts to seek the reader to the given offset, either by +// resolving `io.Seeker`, by detecting `io.ReaderAt`, or discarding +// up to the given offset. +func seekReader(r io.Reader, offset, size int64) (io.Reader, error) { + // attempt to resolve r as a seeker and setup the offset. + seeker, ok := r.(io.Seeker) + if ok { + nn, err := seeker.Seek(offset, io.SeekStart) + if nn != offset { + return nil, errors.Wrapf(err, "failed to seek to offset %v", offset) + } + + if err != nil { + return nil, err + } + + return r, nil + } + + // ok, let's try io.ReaderAt! + readerAt, ok := r.(io.ReaderAt) + if ok && size > offset { + sr := io.NewSectionReader(readerAt, offset, size) + return sr, nil + } + + // well then, let's just discard up to the offset + n, err := copyWithBuffer(ioutil.Discard, io.LimitReader(r, offset)) + if err != nil { + return nil, errors.Wrap(err, "failed to discard to offset") + } + if n != offset { + return nil, errors.Errorf("unable to discard to offset") + } + + return r, nil +} + +// copyWithBuffer is very similar to io.CopyBuffer https://golang.org/pkg/io/#CopyBuffer +// but instead of using Read to read from the src, we use ReadAtLeast to make sure we have +// a full buffer before we do a write operation to dst to reduce overheads associated +// with the write operations of small buffers. +func copyWithBuffer(dst io.Writer, src io.Reader) (written int64, err error) { + // If the reader has a WriteTo method, use it to do the copy. + // Avoids an allocation and a copy. + if wt, ok := src.(io.WriterTo); ok { + return wt.WriteTo(dst) + } + // Similarly, if the writer has a ReadFrom method, use it to do the copy. + if rt, ok := dst.(io.ReaderFrom); ok { + return rt.ReadFrom(src) + } + bufRef := bufPool.Get().(*[]byte) + defer bufPool.Put(bufRef) + buf := *bufRef + for { + nr, er := io.ReadAtLeast(src, buf, len(buf)) + if nr > 0 { + nw, ew := dst.Write(buf[0:nr]) + if nw > 0 { + written += int64(nw) + } + if ew != nil { + err = ew + break + } + if nr != nw { + err = io.ErrShortWrite + break + } + } + if er != nil { + // If an EOF happens after reading fewer than the requested bytes, + // ReadAtLeast returns ErrUnexpectedEOF. + if er != io.EOF && er != io.ErrUnexpectedEOF { + err = er + } + break + } + } + return +} diff --git a/vendor/github.com/containerd/containerd/content/local/locks.go b/vendor/github.com/containerd/containerd/content/local/locks.go new file mode 100644 index 0000000000..d1d2d564df --- /dev/null +++ b/vendor/github.com/containerd/containerd/content/local/locks.go @@ -0,0 +1,56 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package local + +import ( + "sync" + "time" + + "github.com/containerd/containerd/errdefs" + "github.com/pkg/errors" +) + +// Handles locking references + +type lock struct { + since time.Time +} + +var ( + // locks lets us lock in process + locks = make(map[string]*lock) + locksMu sync.Mutex +) + +func tryLock(ref string) error { + locksMu.Lock() + defer locksMu.Unlock() + + if v, ok := locks[ref]; ok { + return errors.Wrapf(errdefs.ErrUnavailable, "ref %s locked since %s", ref, v.since) + } + + locks[ref] = &lock{time.Now()} + return nil +} + +func unlock(ref string) { + locksMu.Lock() + defer locksMu.Unlock() + + delete(locks, ref) +} diff --git a/vendor/github.com/containerd/containerd/content/local/readerat.go b/vendor/github.com/containerd/containerd/content/local/readerat.go new file mode 100644 index 0000000000..5d3ae03903 --- /dev/null +++ b/vendor/github.com/containerd/containerd/content/local/readerat.go @@ -0,0 +1,68 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package local + +import ( + "os" + + "github.com/pkg/errors" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" +) + +// readerat implements io.ReaderAt in a completely stateless manner by opening +// the referenced file for each call to ReadAt. +type sizeReaderAt struct { + size int64 + fp *os.File +} + +// OpenReader creates ReaderAt from a file +func OpenReader(p string) (content.ReaderAt, error) { + fi, err := os.Stat(p) + if err != nil { + if !os.IsNotExist(err) { + return nil, err + } + + return nil, errors.Wrap(errdefs.ErrNotFound, "blob not found") + } + + fp, err := os.Open(p) + if err != nil { + if !os.IsNotExist(err) { + return nil, err + } + + return nil, errors.Wrap(errdefs.ErrNotFound, "blob not found") + } + + return sizeReaderAt{size: fi.Size(), fp: fp}, nil +} + +func (ra sizeReaderAt) ReadAt(p []byte, offset int64) (int, error) { + return ra.fp.ReadAt(p, offset) +} + +func (ra sizeReaderAt) Size() int64 { + return ra.size +} + +func (ra sizeReaderAt) Close() error { + return ra.fp.Close() +} diff --git a/vendor/github.com/containerd/containerd/content/local/store.go b/vendor/github.com/containerd/containerd/content/local/store.go new file mode 100644 index 0000000000..314d913673 --- /dev/null +++ b/vendor/github.com/containerd/containerd/content/local/store.go @@ -0,0 +1,701 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package local + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "math/rand" + "os" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/filters" + "github.com/containerd/containerd/log" + "github.com/sirupsen/logrus" + + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +var bufPool = sync.Pool{ + New: func() interface{} { + buffer := make([]byte, 1<<20) + return &buffer + }, +} + +// LabelStore is used to store mutable labels for digests +type LabelStore interface { + // Get returns all the labels for the given digest + Get(digest.Digest) (map[string]string, error) + + // Set sets all the labels for a given digest + Set(digest.Digest, map[string]string) error + + // Update replaces the given labels for a digest, + // a key with an empty value removes a label. + Update(digest.Digest, map[string]string) (map[string]string, error) +} + +// Store is digest-keyed store for content. All data written into the store is +// stored under a verifiable digest. +// +// Store can generally support multi-reader, single-writer ingest of data, +// including resumable ingest. +type store struct { + root string + ls LabelStore +} + +// NewStore returns a local content store +func NewStore(root string) (content.Store, error) { + return NewLabeledStore(root, nil) +} + +// NewLabeledStore returns a new content store using the provided label store +// +// Note: content stores which are used underneath a metadata store may not +// require labels and should use `NewStore`. `NewLabeledStore` is primarily +// useful for tests or standalone implementations. +func NewLabeledStore(root string, ls LabelStore) (content.Store, error) { + if err := os.MkdirAll(filepath.Join(root, "ingest"), 0777); err != nil { + return nil, err + } + + return &store{ + root: root, + ls: ls, + }, nil +} + +func (s *store) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) { + p, err := s.blobPath(dgst) + if err != nil { + return content.Info{}, errors.Wrapf(err, "calculating blob info path") + } + + fi, err := os.Stat(p) + if err != nil { + if os.IsNotExist(err) { + err = errors.Wrapf(errdefs.ErrNotFound, "content %v", dgst) + } + + return content.Info{}, err + } + var labels map[string]string + if s.ls != nil { + labels, err = s.ls.Get(dgst) + if err != nil { + return content.Info{}, err + } + } + return s.info(dgst, fi, labels), nil +} + +func (s *store) info(dgst digest.Digest, fi os.FileInfo, labels map[string]string) content.Info { + return content.Info{ + Digest: dgst, + Size: fi.Size(), + CreatedAt: fi.ModTime(), + UpdatedAt: getATime(fi), + Labels: labels, + } +} + +// ReaderAt returns an io.ReaderAt for the blob. +func (s *store) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { + p, err := s.blobPath(desc.Digest) + if err != nil { + return nil, errors.Wrapf(err, "calculating blob path for ReaderAt") + } + + reader, err := OpenReader(p) + if err != nil { + return nil, errors.Wrapf(err, "blob %s expected at %s", desc.Digest, p) + } + + return reader, nil +} + +// Delete removes a blob by its digest. +// +// While this is safe to do concurrently, safe exist-removal logic must hold +// some global lock on the store. +func (s *store) Delete(ctx context.Context, dgst digest.Digest) error { + bp, err := s.blobPath(dgst) + if err != nil { + return errors.Wrapf(err, "calculating blob path for delete") + } + + if err := os.RemoveAll(bp); err != nil { + if !os.IsNotExist(err) { + return err + } + + return errors.Wrapf(errdefs.ErrNotFound, "content %v", dgst) + } + + return nil +} + +func (s *store) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) { + if s.ls == nil { + return content.Info{}, errors.Wrapf(errdefs.ErrFailedPrecondition, "update not supported on immutable content store") + } + + p, err := s.blobPath(info.Digest) + if err != nil { + return content.Info{}, errors.Wrapf(err, "calculating blob path for update") + } + + fi, err := os.Stat(p) + if err != nil { + if os.IsNotExist(err) { + err = errors.Wrapf(errdefs.ErrNotFound, "content %v", info.Digest) + } + + return content.Info{}, err + } + + var ( + all bool + labels map[string]string + ) + if len(fieldpaths) > 0 { + for _, path := range fieldpaths { + if strings.HasPrefix(path, "labels.") { + if labels == nil { + labels = map[string]string{} + } + + key := strings.TrimPrefix(path, "labels.") + labels[key] = info.Labels[key] + continue + } + + switch path { + case "labels": + all = true + labels = info.Labels + default: + return content.Info{}, errors.Wrapf(errdefs.ErrInvalidArgument, "cannot update %q field on content info %q", path, info.Digest) + } + } + } else { + all = true + labels = info.Labels + } + + if all { + err = s.ls.Set(info.Digest, labels) + } else { + labels, err = s.ls.Update(info.Digest, labels) + } + if err != nil { + return content.Info{}, err + } + + info = s.info(info.Digest, fi, labels) + info.UpdatedAt = time.Now() + + if err := os.Chtimes(p, info.UpdatedAt, info.CreatedAt); err != nil { + log.G(ctx).WithError(err).Warnf("could not change access time for %s", info.Digest) + } + + return info, nil +} + +func (s *store) Walk(ctx context.Context, fn content.WalkFunc, fs ...string) error { + root := filepath.Join(s.root, "blobs") + + filter, err := filters.ParseAll(fs...) + if err != nil { + return err + } + + var alg digest.Algorithm + return filepath.Walk(root, func(path string, fi os.FileInfo, err error) error { + if err != nil { + return err + } + if !fi.IsDir() && !alg.Available() { + return nil + } + + // TODO(stevvooe): There are few more cases with subdirs that should be + // handled in case the layout gets corrupted. This isn't strict enough + // and may spew bad data. + + if path == root { + return nil + } + if filepath.Dir(path) == root { + alg = digest.Algorithm(filepath.Base(path)) + + if !alg.Available() { + alg = "" + return filepath.SkipDir + } + + // descending into a hash directory + return nil + } + + dgst := digest.NewDigestFromHex(alg.String(), filepath.Base(path)) + if err := dgst.Validate(); err != nil { + // log error but don't report + log.L.WithError(err).WithField("path", path).Error("invalid digest for blob path") + // if we see this, it could mean some sort of corruption of the + // store or extra paths not expected previously. + } + + var labels map[string]string + if s.ls != nil { + labels, err = s.ls.Get(dgst) + if err != nil { + return err + } + } + + info := s.info(dgst, fi, labels) + if !filter.Match(content.AdaptInfo(info)) { + return nil + } + return fn(info) + }) +} + +func (s *store) Status(ctx context.Context, ref string) (content.Status, error) { + return s.status(s.ingestRoot(ref)) +} + +func (s *store) ListStatuses(ctx context.Context, fs ...string) ([]content.Status, error) { + fp, err := os.Open(filepath.Join(s.root, "ingest")) + if err != nil { + return nil, err + } + + defer fp.Close() + + fis, err := fp.Readdir(-1) + if err != nil { + return nil, err + } + + filter, err := filters.ParseAll(fs...) + if err != nil { + return nil, err + } + + var active []content.Status + for _, fi := range fis { + p := filepath.Join(s.root, "ingest", fi.Name()) + stat, err := s.status(p) + if err != nil { + if !os.IsNotExist(err) { + return nil, err + } + + // TODO(stevvooe): This is a common error if uploads are being + // completed while making this listing. Need to consider taking a + // lock on the whole store to coordinate this aspect. + // + // Another option is to cleanup downloads asynchronously and + // coordinate this method with the cleanup process. + // + // For now, we just skip them, as they really don't exist. + continue + } + + if filter.Match(adaptStatus(stat)) { + active = append(active, stat) + } + } + + return active, nil +} + +// WalkStatusRefs is used to walk all status references +// Failed status reads will be logged and ignored, if +// this function is called while references are being altered, +// these error messages may be produced. +func (s *store) WalkStatusRefs(ctx context.Context, fn func(string) error) error { + fp, err := os.Open(filepath.Join(s.root, "ingest")) + if err != nil { + return err + } + + defer fp.Close() + + fis, err := fp.Readdir(-1) + if err != nil { + return err + } + + for _, fi := range fis { + rf := filepath.Join(s.root, "ingest", fi.Name(), "ref") + + ref, err := readFileString(rf) + if err != nil { + log.G(ctx).WithError(err).WithField("path", rf).Error("failed to read ingest ref") + continue + } + + if err := fn(ref); err != nil { + return err + } + } + + return nil +} + +// status works like stat above except uses the path to the ingest. +func (s *store) status(ingestPath string) (content.Status, error) { + dp := filepath.Join(ingestPath, "data") + fi, err := os.Stat(dp) + if err != nil { + if os.IsNotExist(err) { + err = errors.Wrap(errdefs.ErrNotFound, err.Error()) + } + return content.Status{}, err + } + + ref, err := readFileString(filepath.Join(ingestPath, "ref")) + if err != nil { + if os.IsNotExist(err) { + err = errors.Wrap(errdefs.ErrNotFound, err.Error()) + } + return content.Status{}, err + } + + startedAt, err := readFileTimestamp(filepath.Join(ingestPath, "startedat")) + if err != nil { + return content.Status{}, errors.Wrapf(err, "could not read startedat") + } + + updatedAt, err := readFileTimestamp(filepath.Join(ingestPath, "updatedat")) + if err != nil { + return content.Status{}, errors.Wrapf(err, "could not read updatedat") + } + + // because we don't write updatedat on every write, the mod time may + // actually be more up to date. + if fi.ModTime().After(updatedAt) { + updatedAt = fi.ModTime() + } + + return content.Status{ + Ref: ref, + Offset: fi.Size(), + Total: s.total(ingestPath), + UpdatedAt: updatedAt, + StartedAt: startedAt, + }, nil +} + +func adaptStatus(status content.Status) filters.Adaptor { + return filters.AdapterFunc(func(fieldpath []string) (string, bool) { + if len(fieldpath) == 0 { + return "", false + } + switch fieldpath[0] { + case "ref": + return status.Ref, true + } + + return "", false + }) +} + +// total attempts to resolve the total expected size for the write. +func (s *store) total(ingestPath string) int64 { + totalS, err := readFileString(filepath.Join(ingestPath, "total")) + if err != nil { + return 0 + } + + total, err := strconv.ParseInt(totalS, 10, 64) + if err != nil { + // represents a corrupted file, should probably remove. + return 0 + } + + return total +} + +// Writer begins or resumes the active writer identified by ref. If the writer +// is already in use, an error is returned. Only one writer may be in use per +// ref at a time. +// +// The argument `ref` is used to uniquely identify a long-lived writer transaction. +func (s *store) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { + var wOpts content.WriterOpts + for _, opt := range opts { + if err := opt(&wOpts); err != nil { + return nil, err + } + } + // TODO(AkihiroSuda): we could create a random string or one calculated based on the context + // https://github.com/containerd/containerd/issues/2129#issuecomment-380255019 + if wOpts.Ref == "" { + return nil, errors.Wrap(errdefs.ErrInvalidArgument, "ref must not be empty") + } + var lockErr error + for count := uint64(0); count < 10; count++ { + if err := tryLock(wOpts.Ref); err != nil { + if !errdefs.IsUnavailable(err) { + return nil, err + } + + lockErr = err + } else { + lockErr = nil + break + } + time.Sleep(time.Millisecond * time.Duration(rand.Intn(1< 0 && status.Total > 0 && total != status.Total { + return status, errors.Errorf("provided total differs from status: %v != %v", total, status.Total) + } + + // TODO(stevvooe): slow slow slow!!, send to goroutine or use resumable hashes + fp, err := os.Open(data) + if err != nil { + return status, err + } + + p := bufPool.Get().(*[]byte) + status.Offset, err = io.CopyBuffer(digester.Hash(), fp, *p) + bufPool.Put(p) + fp.Close() + return status, err +} + +// writer provides the main implementation of the Writer method. The caller +// must hold the lock correctly and release on error if there is a problem. +func (s *store) writer(ctx context.Context, ref string, total int64, expected digest.Digest) (content.Writer, error) { + // TODO(stevvooe): Need to actually store expected here. We have + // code in the service that shouldn't be dealing with this. + if expected != "" { + p, err := s.blobPath(expected) + if err != nil { + return nil, errors.Wrap(err, "calculating expected blob path for writer") + } + if _, err := os.Stat(p); err == nil { + return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "content %v", expected) + } + } + + path, refp, data := s.ingestPaths(ref) + + var ( + digester = digest.Canonical.Digester() + offset int64 + startedAt time.Time + updatedAt time.Time + ) + + foundValidIngest := false + // ensure that the ingest path has been created. + if err := os.Mkdir(path, 0755); err != nil { + if !os.IsExist(err) { + return nil, err + } + status, err := s.resumeStatus(ref, total, digester) + if err == nil { + foundValidIngest = true + updatedAt = status.UpdatedAt + startedAt = status.StartedAt + total = status.Total + offset = status.Offset + } else { + logrus.Infof("failed to resume the status from path %s: %s. will recreate them", path, err.Error()) + } + } + + if !foundValidIngest { + startedAt = time.Now() + updatedAt = startedAt + + // the ingest is new, we need to setup the target location. + // write the ref to a file for later use + if err := ioutil.WriteFile(refp, []byte(ref), 0666); err != nil { + return nil, err + } + + if err := writeTimestampFile(filepath.Join(path, "startedat"), startedAt); err != nil { + return nil, err + } + + if err := writeTimestampFile(filepath.Join(path, "updatedat"), startedAt); err != nil { + return nil, err + } + + if total > 0 { + if err := ioutil.WriteFile(filepath.Join(path, "total"), []byte(fmt.Sprint(total)), 0666); err != nil { + return nil, err + } + } + } + + fp, err := os.OpenFile(data, os.O_WRONLY|os.O_CREATE, 0666) + if err != nil { + return nil, errors.Wrap(err, "failed to open data file") + } + + if _, err := fp.Seek(offset, io.SeekStart); err != nil { + return nil, errors.Wrap(err, "could not seek to current write offset") + } + + return &writer{ + s: s, + fp: fp, + ref: ref, + path: path, + offset: offset, + total: total, + digester: digester, + startedAt: startedAt, + updatedAt: updatedAt, + }, nil +} + +// Abort an active transaction keyed by ref. If the ingest is active, it will +// be cancelled. Any resources associated with the ingest will be cleaned. +func (s *store) Abort(ctx context.Context, ref string) error { + root := s.ingestRoot(ref) + if err := os.RemoveAll(root); err != nil { + if os.IsNotExist(err) { + return errors.Wrapf(errdefs.ErrNotFound, "ingest ref %q", ref) + } + + return err + } + + return nil +} + +func (s *store) blobPath(dgst digest.Digest) (string, error) { + if err := dgst.Validate(); err != nil { + return "", errors.Wrapf(errdefs.ErrInvalidArgument, "cannot calculate blob path from invalid digest: %v", err) + } + + return filepath.Join(s.root, "blobs", dgst.Algorithm().String(), dgst.Hex()), nil +} + +func (s *store) ingestRoot(ref string) string { + // we take a digest of the ref to keep the ingest paths constant length. + // Note that this is not the current or potential digest of incoming content. + dgst := digest.FromString(ref) + return filepath.Join(s.root, "ingest", dgst.Hex()) +} + +// ingestPaths are returned. The paths are the following: +// +// - root: entire ingest directory +// - ref: name of the starting ref, must be unique +// - data: file where data is written +// +func (s *store) ingestPaths(ref string) (string, string, string) { + var ( + fp = s.ingestRoot(ref) + rp = filepath.Join(fp, "ref") + dp = filepath.Join(fp, "data") + ) + + return fp, rp, dp +} + +func readFileString(path string) (string, error) { + p, err := ioutil.ReadFile(path) + return string(p), err +} + +// readFileTimestamp reads a file with just a timestamp present. +func readFileTimestamp(p string) (time.Time, error) { + b, err := ioutil.ReadFile(p) + if err != nil { + if os.IsNotExist(err) { + err = errors.Wrap(errdefs.ErrNotFound, err.Error()) + } + return time.Time{}, err + } + + var t time.Time + if err := t.UnmarshalText(b); err != nil { + return time.Time{}, errors.Wrapf(err, "could not parse timestamp file %v", p) + } + + return t, nil +} + +func writeTimestampFile(p string, t time.Time) error { + b, err := t.MarshalText() + if err != nil { + return err + } + return atomicWrite(p, b, 0666) +} + +func atomicWrite(path string, data []byte, mode os.FileMode) error { + tmp := fmt.Sprintf("%s.tmp", path) + f, err := os.OpenFile(tmp, os.O_RDWR|os.O_CREATE|os.O_TRUNC|os.O_SYNC, mode) + if err != nil { + return errors.Wrap(err, "create tmp file") + } + _, err = f.Write(data) + f.Close() + if err != nil { + return errors.Wrap(err, "write atomic data") + } + return os.Rename(tmp, path) +} diff --git a/vendor/github.com/containerd/containerd/content/local/store_bsd.go b/vendor/github.com/containerd/containerd/content/local/store_bsd.go new file mode 100644 index 0000000000..da149a2fda --- /dev/null +++ b/vendor/github.com/containerd/containerd/content/local/store_bsd.go @@ -0,0 +1,33 @@ +// +build darwin freebsd netbsd + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package local + +import ( + "os" + "syscall" + "time" +) + +func getATime(fi os.FileInfo) time.Time { + if st, ok := fi.Sys().(*syscall.Stat_t); ok { + return time.Unix(int64(st.Atimespec.Sec), int64(st.Atimespec.Nsec)) //nolint: unconvert // int64 conversions ensure the line compiles for 32-bit systems as well. + } + + return fi.ModTime() +} diff --git a/vendor/github.com/containerd/containerd/content/local/store_openbsd.go b/vendor/github.com/containerd/containerd/content/local/store_openbsd.go new file mode 100644 index 0000000000..f34f0dad2b --- /dev/null +++ b/vendor/github.com/containerd/containerd/content/local/store_openbsd.go @@ -0,0 +1,33 @@ +// +build openbsd + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package local + +import ( + "os" + "syscall" + "time" +) + +func getATime(fi os.FileInfo) time.Time { + if st, ok := fi.Sys().(*syscall.Stat_t); ok { + return time.Unix(int64(st.Atim.Sec), int64(st.Atim.Nsec)) //nolint: unconvert // int64 conversions ensure the line compiles for 32-bit systems as well. + } + + return fi.ModTime() +} diff --git a/vendor/github.com/containerd/containerd/content/local/store_unix.go b/vendor/github.com/containerd/containerd/content/local/store_unix.go new file mode 100644 index 0000000000..69a74bab0e --- /dev/null +++ b/vendor/github.com/containerd/containerd/content/local/store_unix.go @@ -0,0 +1,33 @@ +// +build linux solaris + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package local + +import ( + "os" + "syscall" + "time" +) + +func getATime(fi os.FileInfo) time.Time { + if st, ok := fi.Sys().(*syscall.Stat_t); ok { + return time.Unix(int64(st.Atim.Sec), int64(st.Atim.Nsec)) //nolint: unconvert // int64 conversions ensure the line compiles for 32-bit systems as well. + } + + return fi.ModTime() +} diff --git a/vendor/github.com/containerd/containerd/content/local/store_windows.go b/vendor/github.com/containerd/containerd/content/local/store_windows.go new file mode 100644 index 0000000000..bce8499790 --- /dev/null +++ b/vendor/github.com/containerd/containerd/content/local/store_windows.go @@ -0,0 +1,26 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package local + +import ( + "os" + "time" +) + +func getATime(fi os.FileInfo) time.Time { + return fi.ModTime() +} diff --git a/vendor/github.com/containerd/containerd/content/local/writer.go b/vendor/github.com/containerd/containerd/content/local/writer.go new file mode 100644 index 0000000000..0a11f4d912 --- /dev/null +++ b/vendor/github.com/containerd/containerd/content/local/writer.go @@ -0,0 +1,207 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package local + +import ( + "context" + "io" + "os" + "path/filepath" + "runtime" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/log" + "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +// writer represents a write transaction against the blob store. +type writer struct { + s *store + fp *os.File // opened data file + path string // path to writer dir + ref string // ref key + offset int64 + total int64 + digester digest.Digester + startedAt time.Time + updatedAt time.Time +} + +func (w *writer) Status() (content.Status, error) { + return content.Status{ + Ref: w.ref, + Offset: w.offset, + Total: w.total, + StartedAt: w.startedAt, + UpdatedAt: w.updatedAt, + }, nil +} + +// Digest returns the current digest of the content, up to the current write. +// +// Cannot be called concurrently with `Write`. +func (w *writer) Digest() digest.Digest { + return w.digester.Digest() +} + +// Write p to the transaction. +// +// Note that writes are unbuffered to the backing file. When writing, it is +// recommended to wrap in a bufio.Writer or, preferably, use io.CopyBuffer. +func (w *writer) Write(p []byte) (n int, err error) { + n, err = w.fp.Write(p) + w.digester.Hash().Write(p[:n]) + w.offset += int64(len(p)) + w.updatedAt = time.Now() + return n, err +} + +func (w *writer) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { + // Ensure even on error the writer is fully closed + defer unlock(w.ref) + + var base content.Info + for _, opt := range opts { + if err := opt(&base); err != nil { + return err + } + } + + fp := w.fp + w.fp = nil + + if fp == nil { + return errors.Wrap(errdefs.ErrFailedPrecondition, "cannot commit on closed writer") + } + + if err := fp.Sync(); err != nil { + fp.Close() + return errors.Wrap(err, "sync failed") + } + + fi, err := fp.Stat() + closeErr := fp.Close() + if err != nil { + return errors.Wrap(err, "stat on ingest file failed") + } + if closeErr != nil { + return errors.Wrap(err, "failed to close ingest file") + } + + if size > 0 && size != fi.Size() { + return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit size %d, expected %d", fi.Size(), size) + } + + dgst := w.digester.Digest() + if expected != "" && expected != dgst { + return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit digest %s, expected %s", dgst, expected) + } + + var ( + ingest = filepath.Join(w.path, "data") + target, _ = w.s.blobPath(dgst) // ignore error because we calculated this dgst + ) + + // make sure parent directories of blob exist + if err := os.MkdirAll(filepath.Dir(target), 0755); err != nil { + return err + } + + if _, err := os.Stat(target); err == nil { + // collision with the target file! + if err := os.RemoveAll(w.path); err != nil { + log.G(ctx).WithField("ref", w.ref).WithField("path", w.path).Errorf("failed to remove ingest directory") + } + return errors.Wrapf(errdefs.ErrAlreadyExists, "content %v", dgst) + } + + if err := os.Rename(ingest, target); err != nil { + return err + } + + // Ingest has now been made available in the content store, attempt to complete + // setting metadata but errors should only be logged and not returned since + // the content store cannot be cleanly rolled back. + + commitTime := time.Now() + if err := os.Chtimes(target, commitTime, commitTime); err != nil { + log.G(ctx).WithField("digest", dgst).Errorf("failed to change file time to commit time") + } + + // clean up!! + if err := os.RemoveAll(w.path); err != nil { + log.G(ctx).WithField("ref", w.ref).WithField("path", w.path).Errorf("failed to remove ingest directory") + } + + if w.s.ls != nil && base.Labels != nil { + if err := w.s.ls.Set(dgst, base.Labels); err != nil { + log.G(ctx).WithField("digest", dgst).Errorf("failed to set labels") + } + } + + // change to readonly, more important for read, but provides _some_ + // protection from this point on. We use the existing perms with a mask + // only allowing reads honoring the umask on creation. + // + // This removes write and exec, only allowing read per the creation umask. + // + // NOTE: Windows does not support this operation + if runtime.GOOS != "windows" { + if err := os.Chmod(target, (fi.Mode()&os.ModePerm)&^0333); err != nil { + log.G(ctx).WithField("ref", w.ref).Errorf("failed to make readonly") + } + } + + return nil +} + +// Close the writer, flushing any unwritten data and leaving the progress in +// tact. +// +// If one needs to resume the transaction, a new writer can be obtained from +// `Ingester.Writer` using the same key. The write can then be continued +// from it was left off. +// +// To abandon a transaction completely, first call close then `IngestManager.Abort` to +// clean up the associated resources. +func (w *writer) Close() (err error) { + if w.fp != nil { + w.fp.Sync() + err = w.fp.Close() + writeTimestampFile(filepath.Join(w.path, "updatedat"), w.updatedAt) + w.fp = nil + unlock(w.ref) + return + } + + return nil +} + +func (w *writer) Truncate(size int64) error { + if size != 0 { + return errors.New("Truncate: unsupported size") + } + w.offset = 0 + w.digester.Hash().Reset() + if _, err := w.fp.Seek(0, io.SeekStart); err != nil { + return err + } + return w.fp.Truncate(0) +} diff --git a/vendor/github.com/containerd/containerd/errdefs/errors.go b/vendor/github.com/containerd/containerd/errdefs/errors.go new file mode 100644 index 0000000000..05a35228ca --- /dev/null +++ b/vendor/github.com/containerd/containerd/errdefs/errors.go @@ -0,0 +1,93 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package errdefs defines the common errors used throughout containerd +// packages. +// +// Use with errors.Wrap and error.Wrapf to add context to an error. +// +// To detect an error class, use the IsXXX functions to tell whether an error +// is of a certain type. +// +// The functions ToGRPC and FromGRPC can be used to map server-side and +// client-side errors to the correct types. +package errdefs + +import ( + "context" + + "github.com/pkg/errors" +) + +// Definitions of common error types used throughout containerd. All containerd +// errors returned by most packages will map into one of these errors classes. +// Packages should return errors of these types when they want to instruct a +// client to take a particular action. +// +// For the most part, we just try to provide local grpc errors. Most conditions +// map very well to those defined by grpc. +var ( + ErrUnknown = errors.New("unknown") // used internally to represent a missed mapping. + ErrInvalidArgument = errors.New("invalid argument") + ErrNotFound = errors.New("not found") + ErrAlreadyExists = errors.New("already exists") + ErrFailedPrecondition = errors.New("failed precondition") + ErrUnavailable = errors.New("unavailable") + ErrNotImplemented = errors.New("not implemented") // represents not supported and unimplemented +) + +// IsInvalidArgument returns true if the error is due to an invalid argument +func IsInvalidArgument(err error) bool { + return errors.Is(err, ErrInvalidArgument) +} + +// IsNotFound returns true if the error is due to a missing object +func IsNotFound(err error) bool { + return errors.Is(err, ErrNotFound) +} + +// IsAlreadyExists returns true if the error is due to an already existing +// metadata item +func IsAlreadyExists(err error) bool { + return errors.Is(err, ErrAlreadyExists) +} + +// IsFailedPrecondition returns true if an operation could not proceed to the +// lack of a particular condition +func IsFailedPrecondition(err error) bool { + return errors.Is(err, ErrFailedPrecondition) +} + +// IsUnavailable returns true if the error is due to a resource being unavailable +func IsUnavailable(err error) bool { + return errors.Is(err, ErrUnavailable) +} + +// IsNotImplemented returns true if the error is due to not being implemented +func IsNotImplemented(err error) bool { + return errors.Is(err, ErrNotImplemented) +} + +// IsCanceled returns true if the error is due to `context.Canceled`. +func IsCanceled(err error) bool { + return errors.Is(err, context.Canceled) +} + +// IsDeadlineExceeded returns true if the error is due to +// `context.DeadlineExceeded`. +func IsDeadlineExceeded(err error) bool { + return errors.Is(err, context.DeadlineExceeded) +} diff --git a/vendor/github.com/containerd/containerd/errdefs/grpc.go b/vendor/github.com/containerd/containerd/errdefs/grpc.go new file mode 100644 index 0000000000..209f63bd0f --- /dev/null +++ b/vendor/github.com/containerd/containerd/errdefs/grpc.go @@ -0,0 +1,147 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package errdefs + +import ( + "context" + "strings" + + "github.com/pkg/errors" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// ToGRPC will attempt to map the backend containerd error into a grpc error, +// using the original error message as a description. +// +// Further information may be extracted from certain errors depending on their +// type. +// +// If the error is unmapped, the original error will be returned to be handled +// by the regular grpc error handling stack. +func ToGRPC(err error) error { + if err == nil { + return nil + } + + if isGRPCError(err) { + // error has already been mapped to grpc + return err + } + + switch { + case IsInvalidArgument(err): + return status.Errorf(codes.InvalidArgument, err.Error()) + case IsNotFound(err): + return status.Errorf(codes.NotFound, err.Error()) + case IsAlreadyExists(err): + return status.Errorf(codes.AlreadyExists, err.Error()) + case IsFailedPrecondition(err): + return status.Errorf(codes.FailedPrecondition, err.Error()) + case IsUnavailable(err): + return status.Errorf(codes.Unavailable, err.Error()) + case IsNotImplemented(err): + return status.Errorf(codes.Unimplemented, err.Error()) + case IsCanceled(err): + return status.Errorf(codes.Canceled, err.Error()) + case IsDeadlineExceeded(err): + return status.Errorf(codes.DeadlineExceeded, err.Error()) + } + + return err +} + +// ToGRPCf maps the error to grpc error codes, assembling the formatting string +// and combining it with the target error string. +// +// This is equivalent to errors.ToGRPC(errors.Wrapf(err, format, args...)) +func ToGRPCf(err error, format string, args ...interface{}) error { + return ToGRPC(errors.Wrapf(err, format, args...)) +} + +// FromGRPC returns the underlying error from a grpc service based on the grpc error code +func FromGRPC(err error) error { + if err == nil { + return nil + } + + var cls error // divide these into error classes, becomes the cause + + switch code(err) { + case codes.InvalidArgument: + cls = ErrInvalidArgument + case codes.AlreadyExists: + cls = ErrAlreadyExists + case codes.NotFound: + cls = ErrNotFound + case codes.Unavailable: + cls = ErrUnavailable + case codes.FailedPrecondition: + cls = ErrFailedPrecondition + case codes.Unimplemented: + cls = ErrNotImplemented + case codes.Canceled: + cls = context.Canceled + case codes.DeadlineExceeded: + cls = context.DeadlineExceeded + default: + cls = ErrUnknown + } + + msg := rebaseMessage(cls, err) + if msg != "" { + err = errors.Wrap(cls, msg) + } else { + err = errors.WithStack(cls) + } + + return err +} + +// rebaseMessage removes the repeats for an error at the end of an error +// string. This will happen when taking an error over grpc then remapping it. +// +// Effectively, we just remove the string of cls from the end of err if it +// appears there. +func rebaseMessage(cls error, err error) string { + desc := errDesc(err) + clss := cls.Error() + if desc == clss { + return "" + } + + return strings.TrimSuffix(desc, ": "+clss) +} + +func isGRPCError(err error) bool { + _, ok := status.FromError(err) + return ok +} + +func code(err error) codes.Code { + if s, ok := status.FromError(err); ok { + return s.Code() + } + return codes.Unknown +} + +func errDesc(err error) string { + if s, ok := status.FromError(err); ok { + return s.Message() + } + return err.Error() +} diff --git a/vendor/github.com/containerd/containerd/filters/adaptor.go b/vendor/github.com/containerd/containerd/filters/adaptor.go new file mode 100644 index 0000000000..5a9c559c1e --- /dev/null +++ b/vendor/github.com/containerd/containerd/filters/adaptor.go @@ -0,0 +1,33 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package filters + +// Adaptor specifies the mapping of fieldpaths to a type. For the given field +// path, the value and whether it is present should be returned. The mapping of +// the fieldpath to a field is deferred to the adaptor implementation, but +// should generally follow protobuf field path/mask semantics. +type Adaptor interface { + Field(fieldpath []string) (value string, present bool) +} + +// AdapterFunc allows implementation specific matching of fieldpaths +type AdapterFunc func(fieldpath []string) (string, bool) + +// Field returns the field name and true if it exists +func (fn AdapterFunc) Field(fieldpath []string) (string, bool) { + return fn(fieldpath) +} diff --git a/vendor/github.com/containerd/containerd/filters/filter.go b/vendor/github.com/containerd/containerd/filters/filter.go new file mode 100644 index 0000000000..cf09d8d9e4 --- /dev/null +++ b/vendor/github.com/containerd/containerd/filters/filter.go @@ -0,0 +1,179 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package filters defines a syntax and parser that can be used for the +// filtration of items across the containerd API. The core is built on the +// concept of protobuf field paths, with quoting. Several operators allow the +// user to flexibly select items based on field presence, equality, inequality +// and regular expressions. Flexible adaptors support working with any type. +// +// The syntax is fairly familiar, if you've used container ecosystem +// projects. At the core, we base it on the concept of protobuf field +// paths, augmenting with the ability to quote portions of the field path +// to match arbitrary labels. These "selectors" come in the following +// syntax: +// +// ``` +// [] +// ``` +// +// A basic example is as follows: +// +// ``` +// name==foo +// ``` +// +// This would match all objects that have a field `name` with the value +// `foo`. If we only want to test if the field is present, we can omit the +// operator. This is most useful for matching labels in containerd. The +// following will match objects that have the field "labels" and have the +// label "foo" defined: +// +// ``` +// labels.foo +// ``` +// +// We also allow for quoting of parts of the field path to allow matching +// of arbitrary items: +// +// ``` +// labels."very complex label"==something +// ``` +// +// We also define `!=` and `~=` as operators. The `!=` will match all +// objects that don't match the value for a field and `~=` will compile the +// target value as a regular expression and match the field value against that. +// +// Selectors can be combined using a comma, such that the resulting +// selector will require all selectors are matched for the object to match. +// The following example will match objects that are named `foo` and have +// the label `bar`: +// +// ``` +// name==foo,labels.bar +// ``` +// +package filters + +import ( + "regexp" + + "github.com/containerd/containerd/log" +) + +// Filter matches specific resources based the provided filter +type Filter interface { + Match(adaptor Adaptor) bool +} + +// FilterFunc is a function that handles matching with an adaptor +type FilterFunc func(Adaptor) bool + +// Match matches the FilterFunc returning true if the object matches the filter +func (fn FilterFunc) Match(adaptor Adaptor) bool { + return fn(adaptor) +} + +// Always is a filter that always returns true for any type of object +var Always FilterFunc = func(adaptor Adaptor) bool { + return true +} + +// Any allows multiple filters to be matched against the object +type Any []Filter + +// Match returns true if any of the provided filters are true +func (m Any) Match(adaptor Adaptor) bool { + for _, m := range m { + if m.Match(adaptor) { + return true + } + } + + return false +} + +// All allows multiple filters to be matched against the object +type All []Filter + +// Match only returns true if all filters match the object +func (m All) Match(adaptor Adaptor) bool { + for _, m := range m { + if !m.Match(adaptor) { + return false + } + } + + return true +} + +type operator int + +const ( + operatorPresent = iota + operatorEqual + operatorNotEqual + operatorMatches +) + +func (op operator) String() string { + switch op { + case operatorPresent: + return "?" + case operatorEqual: + return "==" + case operatorNotEqual: + return "!=" + case operatorMatches: + return "~=" + } + + return "unknown" +} + +type selector struct { + fieldpath []string + operator operator + value string + re *regexp.Regexp +} + +func (m selector) Match(adaptor Adaptor) bool { + value, present := adaptor.Field(m.fieldpath) + + switch m.operator { + case operatorPresent: + return present + case operatorEqual: + return present && value == m.value + case operatorNotEqual: + return value != m.value + case operatorMatches: + if m.re == nil { + r, err := regexp.Compile(m.value) + if err != nil { + log.L.Errorf("error compiling regexp %q", m.value) + return false + } + + m.re = r + } + + return m.re.MatchString(value) + default: + return false + } +} diff --git a/vendor/github.com/containerd/containerd/filters/parser.go b/vendor/github.com/containerd/containerd/filters/parser.go new file mode 100644 index 0000000000..0825d668ca --- /dev/null +++ b/vendor/github.com/containerd/containerd/filters/parser.go @@ -0,0 +1,292 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package filters + +import ( + "fmt" + "io" + + "github.com/containerd/containerd/errdefs" + "github.com/pkg/errors" +) + +/* +Parse the strings into a filter that may be used with an adaptor. + +The filter is made up of zero or more selectors. + +The format is a comma separated list of expressions, in the form of +``, known as selectors. All selectors must match the +target object for the filter to be true. + +We define the operators "==" for equality, "!=" for not equal and "~=" for a +regular expression. If the operator and value are not present, the matcher will +test for the presence of a value, as defined by the target object. + +The formal grammar is as follows: + +selectors := selector ("," selector)* +selector := fieldpath (operator value) +fieldpath := field ('.' field)* +field := quoted | [A-Za-z] [A-Za-z0-9_]+ +operator := "==" | "!=" | "~=" +value := quoted | [^\s,]+ +quoted := + +*/ +func Parse(s string) (Filter, error) { + // special case empty to match all + if s == "" { + return Always, nil + } + + p := parser{input: s} + return p.parse() +} + +// ParseAll parses each filter in ss and returns a filter that will return true +// if any filter matches the expression. +// +// If no filters are provided, the filter will match anything. +func ParseAll(ss ...string) (Filter, error) { + if len(ss) == 0 { + return Always, nil + } + + var fs []Filter + for _, s := range ss { + f, err := Parse(s) + if err != nil { + return nil, errors.Wrap(errdefs.ErrInvalidArgument, err.Error()) + } + + fs = append(fs, f) + } + + return Any(fs), nil +} + +type parser struct { + input string + scanner scanner +} + +func (p *parser) parse() (Filter, error) { + p.scanner.init(p.input) + + ss, err := p.selectors() + if err != nil { + return nil, errors.Wrap(err, "filters") + } + + return ss, nil +} + +func (p *parser) selectors() (Filter, error) { + s, err := p.selector() + if err != nil { + return nil, err + } + + ss := All{s} + +loop: + for { + tok := p.scanner.peek() + switch tok { + case ',': + pos, tok, _ := p.scanner.scan() + if tok != tokenSeparator { + return nil, p.mkerr(pos, "expected a separator") + } + + s, err := p.selector() + if err != nil { + return nil, err + } + + ss = append(ss, s) + case tokenEOF: + break loop + default: + return nil, p.mkerr(p.scanner.ppos, "unexpected input: %v", string(tok)) + } + } + + return ss, nil +} + +func (p *parser) selector() (selector, error) { + fieldpath, err := p.fieldpath() + if err != nil { + return selector{}, err + } + + switch p.scanner.peek() { + case ',', tokenSeparator, tokenEOF: + return selector{ + fieldpath: fieldpath, + operator: operatorPresent, + }, nil + } + + op, err := p.operator() + if err != nil { + return selector{}, err + } + + var allowAltQuotes bool + if op == operatorMatches { + allowAltQuotes = true + } + + value, err := p.value(allowAltQuotes) + if err != nil { + if err == io.EOF { + return selector{}, io.ErrUnexpectedEOF + } + return selector{}, err + } + + return selector{ + fieldpath: fieldpath, + value: value, + operator: op, + }, nil +} + +func (p *parser) fieldpath() ([]string, error) { + f, err := p.field() + if err != nil { + return nil, err + } + + fs := []string{f} +loop: + for { + tok := p.scanner.peek() // lookahead to consume field separator + + switch tok { + case '.': + pos, tok, _ := p.scanner.scan() // consume separator + if tok != tokenSeparator { + return nil, p.mkerr(pos, "expected a field separator (`.`)") + } + + f, err := p.field() + if err != nil { + return nil, err + } + + fs = append(fs, f) + default: + // let the layer above handle the other bad cases. + break loop + } + } + + return fs, nil +} + +func (p *parser) field() (string, error) { + pos, tok, s := p.scanner.scan() + switch tok { + case tokenField: + return s, nil + case tokenQuoted: + return p.unquote(pos, s, false) + case tokenIllegal: + return "", p.mkerr(pos, p.scanner.err) + } + + return "", p.mkerr(pos, "expected field or quoted") +} + +func (p *parser) operator() (operator, error) { + pos, tok, s := p.scanner.scan() + switch tok { + case tokenOperator: + switch s { + case "==": + return operatorEqual, nil + case "!=": + return operatorNotEqual, nil + case "~=": + return operatorMatches, nil + default: + return 0, p.mkerr(pos, "unsupported operator %q", s) + } + case tokenIllegal: + return 0, p.mkerr(pos, p.scanner.err) + } + + return 0, p.mkerr(pos, `expected an operator ("=="|"!="|"~=")`) +} + +func (p *parser) value(allowAltQuotes bool) (string, error) { + pos, tok, s := p.scanner.scan() + + switch tok { + case tokenValue, tokenField: + return s, nil + case tokenQuoted: + return p.unquote(pos, s, allowAltQuotes) + case tokenIllegal: + return "", p.mkerr(pos, p.scanner.err) + } + + return "", p.mkerr(pos, "expected value or quoted") +} + +func (p *parser) unquote(pos int, s string, allowAlts bool) (string, error) { + if !allowAlts && s[0] != '\'' && s[0] != '"' { + return "", p.mkerr(pos, "invalid quote encountered") + } + + uq, err := unquote(s) + if err != nil { + return "", p.mkerr(pos, "unquoting failed: %v", err) + } + + return uq, nil +} + +type parseError struct { + input string + pos int + msg string +} + +func (pe parseError) Error() string { + if pe.pos < len(pe.input) { + before := pe.input[:pe.pos] + location := pe.input[pe.pos : pe.pos+1] // need to handle end + after := pe.input[pe.pos+1:] + + return fmt.Sprintf("[%s >|%s|< %s]: %v", before, location, after, pe.msg) + } + + return fmt.Sprintf("[%s]: %v", pe.input, pe.msg) +} + +func (p *parser) mkerr(pos int, format string, args ...interface{}) error { + return errors.Wrap(parseError{ + input: p.input, + pos: pos, + msg: fmt.Sprintf(format, args...), + }, "parse error") +} diff --git a/vendor/github.com/containerd/containerd/filters/quote.go b/vendor/github.com/containerd/containerd/filters/quote.go new file mode 100644 index 0000000000..2d64e23a30 --- /dev/null +++ b/vendor/github.com/containerd/containerd/filters/quote.go @@ -0,0 +1,253 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package filters + +import ( + "unicode/utf8" + + "github.com/pkg/errors" +) + +// NOTE(stevvooe): Most of this code in this file is copied from the stdlib +// strconv package and modified to be able to handle quoting with `/` and `|` +// as delimiters. The copyright is held by the Go authors. + +var errQuoteSyntax = errors.New("quote syntax error") + +// UnquoteChar decodes the first character or byte in the escaped string +// or character literal represented by the string s. +// It returns four values: +// +// 1) value, the decoded Unicode code point or byte value; +// 2) multibyte, a boolean indicating whether the decoded character requires a multibyte UTF-8 representation; +// 3) tail, the remainder of the string after the character; and +// 4) an error that will be nil if the character is syntactically valid. +// +// The second argument, quote, specifies the type of literal being parsed +// and therefore which escaped quote character is permitted. +// If set to a single quote, it permits the sequence \' and disallows unescaped '. +// If set to a double quote, it permits \" and disallows unescaped ". +// If set to zero, it does not permit either escape and allows both quote characters to appear unescaped. +// +// This is from Go strconv package, modified to support `|` and `/` as double +// quotes for use with regular expressions. +func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) { + // easy cases + switch c := s[0]; { + case c == quote && (quote == '\'' || quote == '"' || quote == '/' || quote == '|'): + err = errQuoteSyntax + return + case c >= utf8.RuneSelf: + r, size := utf8.DecodeRuneInString(s) + return r, true, s[size:], nil + case c != '\\': + return rune(s[0]), false, s[1:], nil + } + + // hard case: c is backslash + if len(s) <= 1 { + err = errQuoteSyntax + return + } + c := s[1] + s = s[2:] + + switch c { + case 'a': + value = '\a' + case 'b': + value = '\b' + case 'f': + value = '\f' + case 'n': + value = '\n' + case 'r': + value = '\r' + case 't': + value = '\t' + case 'v': + value = '\v' + case 'x', 'u', 'U': + n := 0 + switch c { + case 'x': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + var v rune + if len(s) < n { + err = errQuoteSyntax + return + } + for j := 0; j < n; j++ { + x, ok := unhex(s[j]) + if !ok { + err = errQuoteSyntax + return + } + v = v<<4 | x + } + s = s[n:] + if c == 'x' { + // single-byte string, possibly not UTF-8 + value = v + break + } + if v > utf8.MaxRune { + err = errQuoteSyntax + return + } + value = v + multibyte = true + case '0', '1', '2', '3', '4', '5', '6', '7': + v := rune(c) - '0' + if len(s) < 2 { + err = errQuoteSyntax + return + } + for j := 0; j < 2; j++ { // one digit already; two more + x := rune(s[j]) - '0' + if x < 0 || x > 7 { + err = errQuoteSyntax + return + } + v = (v << 3) | x + } + s = s[2:] + if v > 255 { + err = errQuoteSyntax + return + } + value = v + case '\\': + value = '\\' + case '\'', '"', '|', '/': + if c != quote { + err = errQuoteSyntax + return + } + value = rune(c) + default: + err = errQuoteSyntax + return + } + tail = s + return +} + +// unquote interprets s as a single-quoted, double-quoted, +// or backquoted Go string literal, returning the string value +// that s quotes. (If s is single-quoted, it would be a Go +// character literal; Unquote returns the corresponding +// one-character string.) +// +// This is modified from the standard library to support `|` and `/` as quote +// characters for use with regular expressions. +func unquote(s string) (string, error) { + n := len(s) + if n < 2 { + return "", errQuoteSyntax + } + quote := s[0] + if quote != s[n-1] { + return "", errQuoteSyntax + } + s = s[1 : n-1] + + if quote == '`' { + if contains(s, '`') { + return "", errQuoteSyntax + } + if contains(s, '\r') { + // -1 because we know there is at least one \r to remove. + buf := make([]byte, 0, len(s)-1) + for i := 0; i < len(s); i++ { + if s[i] != '\r' { + buf = append(buf, s[i]) + } + } + return string(buf), nil + } + return s, nil + } + if quote != '"' && quote != '\'' && quote != '|' && quote != '/' { + return "", errQuoteSyntax + } + if contains(s, '\n') { + return "", errQuoteSyntax + } + + // Is it trivial? Avoid allocation. + if !contains(s, '\\') && !contains(s, quote) { + switch quote { + case '"', '/', '|': // pipe and slash are treated like double quote + return s, nil + case '\'': + r, size := utf8.DecodeRuneInString(s) + if size == len(s) && (r != utf8.RuneError || size != 1) { + return s, nil + } + } + } + + var runeTmp [utf8.UTFMax]byte + buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations. + for len(s) > 0 { + c, multibyte, ss, err := unquoteChar(s, quote) + if err != nil { + return "", err + } + s = ss + if c < utf8.RuneSelf || !multibyte { + buf = append(buf, byte(c)) + } else { + n := utf8.EncodeRune(runeTmp[:], c) + buf = append(buf, runeTmp[:n]...) + } + if quote == '\'' && len(s) != 0 { + // single-quoted must be single character + return "", errQuoteSyntax + } + } + return string(buf), nil +} + +// contains reports whether the string contains the byte c. +func contains(s string, c byte) bool { + for i := 0; i < len(s); i++ { + if s[i] == c { + return true + } + } + return false +} + +func unhex(b byte) (v rune, ok bool) { + c := rune(b) + switch { + case '0' <= c && c <= '9': + return c - '0', true + case 'a' <= c && c <= 'f': + return c - 'a' + 10, true + case 'A' <= c && c <= 'F': + return c - 'A' + 10, true + } + return +} diff --git a/vendor/github.com/containerd/containerd/filters/scanner.go b/vendor/github.com/containerd/containerd/filters/scanner.go new file mode 100644 index 0000000000..6a485467b8 --- /dev/null +++ b/vendor/github.com/containerd/containerd/filters/scanner.go @@ -0,0 +1,297 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package filters + +import ( + "unicode" + "unicode/utf8" +) + +const ( + tokenEOF = -(iota + 1) + tokenQuoted + tokenValue + tokenField + tokenSeparator + tokenOperator + tokenIllegal +) + +type token rune + +func (t token) String() string { + switch t { + case tokenEOF: + return "EOF" + case tokenQuoted: + return "Quoted" + case tokenValue: + return "Value" + case tokenField: + return "Field" + case tokenSeparator: + return "Separator" + case tokenOperator: + return "Operator" + case tokenIllegal: + return "Illegal" + } + + return string(t) +} + +func (t token) GoString() string { + return "token" + t.String() +} + +type scanner struct { + input string + pos int + ppos int // bounds the current rune in the string + value bool + err string +} + +func (s *scanner) init(input string) { + s.input = input + s.pos = 0 + s.ppos = 0 +} + +func (s *scanner) next() rune { + if s.pos >= len(s.input) { + return tokenEOF + } + s.pos = s.ppos + + r, w := utf8.DecodeRuneInString(s.input[s.ppos:]) + s.ppos += w + if r == utf8.RuneError { + if w > 0 { + s.error("rune error") + return tokenIllegal + } + return tokenEOF + } + + if r == 0 { + s.error("unexpected null") + return tokenIllegal + } + + return r +} + +func (s *scanner) peek() rune { + pos := s.pos + ppos := s.ppos + ch := s.next() + s.pos = pos + s.ppos = ppos + return ch +} + +func (s *scanner) scan() (nextp int, tk token, text string) { + var ( + ch = s.next() + pos = s.pos + ) + +chomp: + switch { + case ch == tokenEOF: + case ch == tokenIllegal: + case isQuoteRune(ch): + if !s.scanQuoted(ch) { + return pos, tokenIllegal, s.input[pos:s.ppos] + } + return pos, tokenQuoted, s.input[pos:s.ppos] + case isSeparatorRune(ch): + s.value = false + return pos, tokenSeparator, s.input[pos:s.ppos] + case isOperatorRune(ch): + s.scanOperator() + s.value = true + return pos, tokenOperator, s.input[pos:s.ppos] + case unicode.IsSpace(ch): + // chomp + ch = s.next() + pos = s.pos + goto chomp + case s.value: + s.scanValue() + s.value = false + return pos, tokenValue, s.input[pos:s.ppos] + case isFieldRune(ch): + s.scanField() + return pos, tokenField, s.input[pos:s.ppos] + } + + return s.pos, token(ch), "" +} + +func (s *scanner) scanField() { + for { + ch := s.peek() + if !isFieldRune(ch) { + break + } + s.next() + } +} + +func (s *scanner) scanOperator() { + for { + ch := s.peek() + switch ch { + case '=', '!', '~': + s.next() + default: + return + } + } +} + +func (s *scanner) scanValue() { + for { + ch := s.peek() + if !isValueRune(ch) { + break + } + s.next() + } +} + +func (s *scanner) scanQuoted(quote rune) bool { + var illegal bool + ch := s.next() // read character after quote + for ch != quote { + if ch == '\n' || ch < 0 { + s.error("quoted literal not terminated") + return false + } + if ch == '\\' { + var legal bool + ch, legal = s.scanEscape(quote) + if !legal { + illegal = true + } + } else { + ch = s.next() + } + } + return !illegal +} + +func (s *scanner) scanEscape(quote rune) (ch rune, legal bool) { + ch = s.next() // read character after '/' + switch ch { + case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', quote: + // nothing to do + ch = s.next() + legal = true + case '0', '1', '2', '3', '4', '5', '6', '7': + ch, legal = s.scanDigits(ch, 8, 3) + case 'x': + ch, legal = s.scanDigits(s.next(), 16, 2) + case 'u': + ch, legal = s.scanDigits(s.next(), 16, 4) + case 'U': + ch, legal = s.scanDigits(s.next(), 16, 8) + default: + s.error("illegal escape sequence") + } + return +} + +func (s *scanner) scanDigits(ch rune, base, n int) (rune, bool) { + for n > 0 && digitVal(ch) < base { + ch = s.next() + n-- + } + if n > 0 { + s.error("illegal numeric escape sequence") + return ch, false + } + return ch, true +} + +func (s *scanner) error(msg string) { + if s.err == "" { + s.err = msg + } +} + +func digitVal(ch rune) int { + switch { + case '0' <= ch && ch <= '9': + return int(ch - '0') + case 'a' <= ch && ch <= 'f': + return int(ch - 'a' + 10) + case 'A' <= ch && ch <= 'F': + return int(ch - 'A' + 10) + } + return 16 // larger than any legal digit val +} + +func isFieldRune(r rune) bool { + return (r == '_' || isAlphaRune(r) || isDigitRune(r)) +} + +func isAlphaRune(r rune) bool { + return r >= 'A' && r <= 'Z' || r >= 'a' && r <= 'z' +} + +func isDigitRune(r rune) bool { + return r >= '0' && r <= '9' +} + +func isOperatorRune(r rune) bool { + switch r { + case '=', '!', '~': + return true + } + + return false +} + +func isQuoteRune(r rune) bool { + switch r { + case '/', '|', '"': // maybe add single quoting? + return true + } + + return false +} + +func isSeparatorRune(r rune) bool { + switch r { + case ',', '.': + return true + } + + return false +} + +func isValueRune(r rune) bool { + return r != ',' && !unicode.IsSpace(r) && + (unicode.IsLetter(r) || + unicode.IsDigit(r) || + unicode.IsNumber(r) || + unicode.IsGraphic(r) || + unicode.IsPunct(r)) +} diff --git a/vendor/github.com/containerd/containerd/images/annotations.go b/vendor/github.com/containerd/containerd/images/annotations.go new file mode 100644 index 0000000000..47d92104cd --- /dev/null +++ b/vendor/github.com/containerd/containerd/images/annotations.go @@ -0,0 +1,23 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package images + +const ( + // AnnotationImageName is an annotation on a Descriptor in an index.json + // containing the `Name` value as used by an `Image` struct + AnnotationImageName = "io.containerd.image.name" +) diff --git a/vendor/github.com/containerd/containerd/images/diffid.go b/vendor/github.com/containerd/containerd/images/diffid.go new file mode 100644 index 0000000000..56193cc289 --- /dev/null +++ b/vendor/github.com/containerd/containerd/images/diffid.go @@ -0,0 +1,81 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package images + +import ( + "context" + "io" + + "github.com/containerd/containerd/archive/compression" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/labels" + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" +) + +// GetDiffID gets the diff ID of the layer blob descriptor. +func GetDiffID(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (digest.Digest, error) { + switch desc.MediaType { + case + // If the layer is already uncompressed, we can just return its digest + MediaTypeDockerSchema2Layer, + ocispec.MediaTypeImageLayer, + MediaTypeDockerSchema2LayerForeign, + ocispec.MediaTypeImageLayerNonDistributable: + return desc.Digest, nil + } + info, err := cs.Info(ctx, desc.Digest) + if err != nil { + return "", err + } + v, ok := info.Labels[labels.LabelUncompressed] + if ok { + // Fast path: if the image is already unpacked, we can use the label value + return digest.Parse(v) + } + // if the image is not unpacked, we may not have the label + ra, err := cs.ReaderAt(ctx, desc) + if err != nil { + return "", err + } + defer ra.Close() + r := content.NewReader(ra) + uR, err := compression.DecompressStream(r) + if err != nil { + return "", err + } + defer uR.Close() + digester := digest.Canonical.Digester() + hashW := digester.Hash() + if _, err := io.Copy(hashW, uR); err != nil { + return "", err + } + if err := ra.Close(); err != nil { + return "", err + } + digest := digester.Digest() + // memorize the computed value + if info.Labels == nil { + info.Labels = make(map[string]string) + } + info.Labels[labels.LabelUncompressed] = digest.String() + if _, err := cs.Update(ctx, info, "labels"); err != nil { + logrus.WithError(err).Warnf("failed to set %s label for %s", labels.LabelUncompressed, desc.Digest) + } + return digest, nil +} diff --git a/vendor/github.com/containerd/containerd/images/handlers.go b/vendor/github.com/containerd/containerd/images/handlers.go new file mode 100644 index 0000000000..05a9017bc2 --- /dev/null +++ b/vendor/github.com/containerd/containerd/images/handlers.go @@ -0,0 +1,288 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package images + +import ( + "context" + "fmt" + "sort" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/platforms" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" + "golang.org/x/sync/semaphore" +) + +var ( + // ErrSkipDesc is used to skip processing of a descriptor and + // its descendants. + ErrSkipDesc = fmt.Errorf("skip descriptor") + + // ErrStopHandler is used to signify that the descriptor + // has been handled and should not be handled further. + // This applies only to a single descriptor in a handler + // chain and does not apply to descendant descriptors. + ErrStopHandler = fmt.Errorf("stop handler") +) + +// Handler handles image manifests +type Handler interface { + Handle(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) +} + +// HandlerFunc function implementing the Handler interface +type HandlerFunc func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) + +// Handle image manifests +func (fn HandlerFunc) Handle(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) { + return fn(ctx, desc) +} + +// Handlers returns a handler that will run the handlers in sequence. +// +// A handler may return `ErrStopHandler` to stop calling additional handlers +func Handlers(handlers ...Handler) HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) { + var children []ocispec.Descriptor + for _, handler := range handlers { + ch, err := handler.Handle(ctx, desc) + if err != nil { + if errors.Is(err, ErrStopHandler) { + break + } + return nil, err + } + + children = append(children, ch...) + } + + return children, nil + } +} + +// Walk the resources of an image and call the handler for each. If the handler +// decodes the sub-resources for each image, +// +// This differs from dispatch in that each sibling resource is considered +// synchronously. +func Walk(ctx context.Context, handler Handler, descs ...ocispec.Descriptor) error { + for _, desc := range descs { + + children, err := handler.Handle(ctx, desc) + if err != nil { + if errors.Is(err, ErrSkipDesc) { + continue // don't traverse the children. + } + return err + } + + if len(children) > 0 { + if err := Walk(ctx, handler, children...); err != nil { + return err + } + } + } + + return nil +} + +// Dispatch runs the provided handler for content specified by the descriptors. +// If the handler decode subresources, they will be visited, as well. +// +// Handlers for siblings are run in parallel on the provided descriptors. A +// handler may return `ErrSkipDesc` to signal to the dispatcher to not traverse +// any children. +// +// A concurrency limiter can be passed in to limit the number of concurrent +// handlers running. When limiter is nil, there is no limit. +// +// Typically, this function will be used with `FetchHandler`, often composed +// with other handlers. +// +// If any handler returns an error, the dispatch session will be canceled. +func Dispatch(ctx context.Context, handler Handler, limiter *semaphore.Weighted, descs ...ocispec.Descriptor) error { + eg, ctx2 := errgroup.WithContext(ctx) + for _, desc := range descs { + desc := desc + + if limiter != nil { + if err := limiter.Acquire(ctx, 1); err != nil { + return err + } + } + + eg.Go(func() error { + desc := desc + + children, err := handler.Handle(ctx2, desc) + if limiter != nil { + limiter.Release(1) + } + if err != nil { + if errors.Is(err, ErrSkipDesc) { + return nil // don't traverse the children. + } + return err + } + + if len(children) > 0 { + return Dispatch(ctx2, handler, limiter, children...) + } + + return nil + }) + } + + return eg.Wait() +} + +// ChildrenHandler decodes well-known manifest types and returns their children. +// +// This is useful for supporting recursive fetch and other use cases where you +// want to do a full walk of resources. +// +// One can also replace this with another implementation to allow descending of +// arbitrary types. +func ChildrenHandler(provider content.Provider) HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + return Children(ctx, provider, desc) + } +} + +// SetChildrenLabels is a handler wrapper which sets labels for the content on +// the children returned by the handler and passes through the children. +// Must follow a handler that returns the children to be labeled. +func SetChildrenLabels(manager content.Manager, f HandlerFunc) HandlerFunc { + return SetChildrenMappedLabels(manager, f, nil) +} + +// SetChildrenMappedLabels is a handler wrapper which sets labels for the content on +// the children returned by the handler and passes through the children. +// Must follow a handler that returns the children to be labeled. +// The label map allows the caller to control the labels per child descriptor. +// For returned labels, the index of the child will be appended to the end +// except for the first index when the returned label does not end with '.'. +func SetChildrenMappedLabels(manager content.Manager, f HandlerFunc, labelMap func(ocispec.Descriptor) []string) HandlerFunc { + if labelMap == nil { + labelMap = ChildGCLabels + } + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + children, err := f(ctx, desc) + if err != nil { + return children, err + } + + if len(children) > 0 { + var ( + info = content.Info{ + Digest: desc.Digest, + Labels: map[string]string{}, + } + fields = []string{} + keys = map[string]uint{} + ) + for _, ch := range children { + labelKeys := labelMap(ch) + for _, key := range labelKeys { + idx := keys[key] + keys[key] = idx + 1 + if idx > 0 || key[len(key)-1] == '.' { + key = fmt.Sprintf("%s%d", key, idx) + } + + info.Labels[key] = ch.Digest.String() + fields = append(fields, "labels."+key) + } + } + + _, err := manager.Update(ctx, info, fields...) + if err != nil { + return nil, err + } + } + + return children, err + } +} + +// FilterPlatforms is a handler wrapper which limits the descriptors returned +// based on matching the specified platform matcher. +func FilterPlatforms(f HandlerFunc, m platforms.Matcher) HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + children, err := f(ctx, desc) + if err != nil { + return children, err + } + + var descs []ocispec.Descriptor + + if m == nil { + descs = children + } else { + for _, d := range children { + if d.Platform == nil || m.Match(*d.Platform) { + descs = append(descs, d) + } + } + } + + return descs, nil + } +} + +// LimitManifests is a handler wrapper which filters the manifest descriptors +// returned using the provided platform. +// The results will be ordered according to the comparison operator and +// use the ordering in the manifests for equal matches. +// A limit of 0 or less is considered no limit. +// A not found error is returned if no manifest is matched. +func LimitManifests(f HandlerFunc, m platforms.MatchComparer, n int) HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + children, err := f(ctx, desc) + if err != nil { + return children, err + } + + switch desc.MediaType { + case ocispec.MediaTypeImageIndex, MediaTypeDockerSchema2ManifestList: + sort.SliceStable(children, func(i, j int) bool { + if children[i].Platform == nil { + return false + } + if children[j].Platform == nil { + return true + } + return m.Less(*children[i].Platform, *children[j].Platform) + }) + + if n > 0 { + if len(children) == 0 { + return children, errors.Wrap(errdefs.ErrNotFound, "no match for platform in manifest") + } + if len(children) > n { + children = children[:n] + } + } + default: + // only limit manifests from an index + } + return children, nil + } +} diff --git a/vendor/github.com/containerd/containerd/images/image.go b/vendor/github.com/containerd/containerd/images/image.go new file mode 100644 index 0000000000..2e5cd61c9a --- /dev/null +++ b/vendor/github.com/containerd/containerd/images/image.go @@ -0,0 +1,441 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package images + +import ( + "context" + "encoding/json" + "fmt" + "sort" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/platforms" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// Image provides the model for how containerd views container images. +type Image struct { + // Name of the image. + // + // To be pulled, it must be a reference compatible with resolvers. + // + // This field is required. + Name string + + // Labels provide runtime decoration for the image record. + // + // There is no default behavior for how these labels are propagated. They + // only decorate the static metadata object. + // + // This field is optional. + Labels map[string]string + + // Target describes the root content for this image. Typically, this is + // a manifest, index or manifest list. + Target ocispec.Descriptor + + CreatedAt, UpdatedAt time.Time +} + +// DeleteOptions provide options on image delete +type DeleteOptions struct { + Synchronous bool +} + +// DeleteOpt allows configuring a delete operation +type DeleteOpt func(context.Context, *DeleteOptions) error + +// SynchronousDelete is used to indicate that an image deletion and removal of +// the image resources should occur synchronously before returning a result. +func SynchronousDelete() DeleteOpt { + return func(ctx context.Context, o *DeleteOptions) error { + o.Synchronous = true + return nil + } +} + +// Store and interact with images +type Store interface { + Get(ctx context.Context, name string) (Image, error) + List(ctx context.Context, filters ...string) ([]Image, error) + Create(ctx context.Context, image Image) (Image, error) + + // Update will replace the data in the store with the provided image. If + // one or more fieldpaths are provided, only those fields will be updated. + Update(ctx context.Context, image Image, fieldpaths ...string) (Image, error) + + Delete(ctx context.Context, name string, opts ...DeleteOpt) error +} + +// TODO(stevvooe): Many of these functions make strong platform assumptions, +// which are untrue in a lot of cases. More refactoring must be done here to +// make this work in all cases. + +// Config resolves the image configuration descriptor. +// +// The caller can then use the descriptor to resolve and process the +// configuration of the image. +func (image *Image) Config(ctx context.Context, provider content.Provider, platform platforms.MatchComparer) (ocispec.Descriptor, error) { + return Config(ctx, provider, image.Target, platform) +} + +// RootFS returns the unpacked diffids that make up and images rootfs. +// +// These are used to verify that a set of layers unpacked to the expected +// values. +func (image *Image) RootFS(ctx context.Context, provider content.Provider, platform platforms.MatchComparer) ([]digest.Digest, error) { + desc, err := image.Config(ctx, provider, platform) + if err != nil { + return nil, err + } + return RootFS(ctx, provider, desc) +} + +// Size returns the total size of an image's packed resources. +func (image *Image) Size(ctx context.Context, provider content.Provider, platform platforms.MatchComparer) (int64, error) { + var size int64 + return size, Walk(ctx, Handlers(HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + if desc.Size < 0 { + return nil, errors.Errorf("invalid size %v in %v (%v)", desc.Size, desc.Digest, desc.MediaType) + } + size += desc.Size + return nil, nil + }), LimitManifests(FilterPlatforms(ChildrenHandler(provider), platform), platform, 1)), image.Target) +} + +type platformManifest struct { + p *ocispec.Platform + m *ocispec.Manifest +} + +// Manifest resolves a manifest from the image for the given platform. +// +// When a manifest descriptor inside of a manifest index does not have +// a platform defined, the platform from the image config is considered. +// +// If the descriptor points to a non-index manifest, then the manifest is +// unmarshalled and returned without considering the platform inside of the +// config. +// +// TODO(stevvooe): This violates the current platform agnostic approach to this +// package by returning a specific manifest type. We'll need to refactor this +// to return a manifest descriptor or decide that we want to bring the API in +// this direction because this abstraction is not needed.` +func Manifest(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform platforms.MatchComparer) (ocispec.Manifest, error) { + var ( + limit = 1 + m []platformManifest + wasIndex bool + ) + + if err := Walk(ctx, HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + switch desc.MediaType { + case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: + p, err := content.ReadBlob(ctx, provider, desc) + if err != nil { + return nil, err + } + + if err := validateMediaType(p, desc.MediaType); err != nil { + return nil, errors.Wrapf(err, "manifest: invalid desc %s", desc.Digest) + } + + var manifest ocispec.Manifest + if err := json.Unmarshal(p, &manifest); err != nil { + return nil, err + } + + if desc.Digest != image.Digest && platform != nil { + if desc.Platform != nil && !platform.Match(*desc.Platform) { + return nil, nil + } + + if desc.Platform == nil { + p, err := content.ReadBlob(ctx, provider, manifest.Config) + if err != nil { + return nil, err + } + + var image ocispec.Image + if err := json.Unmarshal(p, &image); err != nil { + return nil, err + } + + if !platform.Match(platforms.Normalize(ocispec.Platform{OS: image.OS, Architecture: image.Architecture})) { + return nil, nil + } + + } + } + + m = append(m, platformManifest{ + p: desc.Platform, + m: &manifest, + }) + + return nil, nil + case MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: + p, err := content.ReadBlob(ctx, provider, desc) + if err != nil { + return nil, err + } + + if err := validateMediaType(p, desc.MediaType); err != nil { + return nil, errors.Wrapf(err, "manifest: invalid desc %s", desc.Digest) + } + + var idx ocispec.Index + if err := json.Unmarshal(p, &idx); err != nil { + return nil, err + } + + if platform == nil { + return idx.Manifests, nil + } + + var descs []ocispec.Descriptor + for _, d := range idx.Manifests { + if d.Platform == nil || platform.Match(*d.Platform) { + descs = append(descs, d) + } + } + + sort.SliceStable(descs, func(i, j int) bool { + if descs[i].Platform == nil { + return false + } + if descs[j].Platform == nil { + return true + } + return platform.Less(*descs[i].Platform, *descs[j].Platform) + }) + + wasIndex = true + + if len(descs) > limit { + return descs[:limit], nil + } + return descs, nil + } + return nil, errors.Wrapf(errdefs.ErrNotFound, "unexpected media type %v for %v", desc.MediaType, desc.Digest) + }), image); err != nil { + return ocispec.Manifest{}, err + } + + if len(m) == 0 { + err := errors.Wrapf(errdefs.ErrNotFound, "manifest %v", image.Digest) + if wasIndex { + err = errors.Wrapf(errdefs.ErrNotFound, "no match for platform in manifest %v", image.Digest) + } + return ocispec.Manifest{}, err + } + return *m[0].m, nil +} + +// Config resolves the image configuration descriptor using a content provided +// to resolve child resources on the image. +// +// The caller can then use the descriptor to resolve and process the +// configuration of the image. +func Config(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform platforms.MatchComparer) (ocispec.Descriptor, error) { + manifest, err := Manifest(ctx, provider, image, platform) + if err != nil { + return ocispec.Descriptor{}, err + } + return manifest.Config, err +} + +// Platforms returns one or more platforms supported by the image. +func Platforms(ctx context.Context, provider content.Provider, image ocispec.Descriptor) ([]ocispec.Platform, error) { + var platformSpecs []ocispec.Platform + return platformSpecs, Walk(ctx, Handlers(HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + if desc.Platform != nil { + platformSpecs = append(platformSpecs, *desc.Platform) + return nil, ErrSkipDesc + } + + switch desc.MediaType { + case MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig: + p, err := content.ReadBlob(ctx, provider, desc) + if err != nil { + return nil, err + } + + var image ocispec.Image + if err := json.Unmarshal(p, &image); err != nil { + return nil, err + } + + platformSpecs = append(platformSpecs, + platforms.Normalize(ocispec.Platform{OS: image.OS, Architecture: image.Architecture})) + } + return nil, nil + }), ChildrenHandler(provider)), image) +} + +// Check returns nil if the all components of an image are available in the +// provider for the specified platform. +// +// If available is true, the caller can assume that required represents the +// complete set of content required for the image. +// +// missing will have the components that are part of required but not available +// in the provider. +// +// If there is a problem resolving content, an error will be returned. +func Check(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform platforms.MatchComparer) (available bool, required, present, missing []ocispec.Descriptor, err error) { + mfst, err := Manifest(ctx, provider, image, platform) + if err != nil { + if errdefs.IsNotFound(err) { + return false, []ocispec.Descriptor{image}, nil, []ocispec.Descriptor{image}, nil + } + + return false, nil, nil, nil, errors.Wrapf(err, "failed to check image %v", image.Digest) + } + + // TODO(stevvooe): It is possible that referenced conponents could have + // children, but this is rare. For now, we ignore this and only verify + // that manifest components are present. + required = append([]ocispec.Descriptor{mfst.Config}, mfst.Layers...) + + for _, desc := range required { + ra, err := provider.ReaderAt(ctx, desc) + if err != nil { + if errdefs.IsNotFound(err) { + missing = append(missing, desc) + continue + } else { + return false, nil, nil, nil, errors.Wrapf(err, "failed to check image %v", desc.Digest) + } + } + ra.Close() + present = append(present, desc) + + } + + return true, required, present, missing, nil +} + +// Children returns the immediate children of content described by the descriptor. +func Children(ctx context.Context, provider content.Provider, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + var descs []ocispec.Descriptor + switch desc.MediaType { + case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: + p, err := content.ReadBlob(ctx, provider, desc) + if err != nil { + return nil, err + } + + if err := validateMediaType(p, desc.MediaType); err != nil { + return nil, errors.Wrapf(err, "children: invalid desc %s", desc.Digest) + } + + // TODO(stevvooe): We just assume oci manifest, for now. There may be + // subtle differences from the docker version. + var manifest ocispec.Manifest + if err := json.Unmarshal(p, &manifest); err != nil { + return nil, err + } + + descs = append(descs, manifest.Config) + descs = append(descs, manifest.Layers...) + case MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: + p, err := content.ReadBlob(ctx, provider, desc) + if err != nil { + return nil, err + } + + if err := validateMediaType(p, desc.MediaType); err != nil { + return nil, errors.Wrapf(err, "children: invalid desc %s", desc.Digest) + } + + var index ocispec.Index + if err := json.Unmarshal(p, &index); err != nil { + return nil, err + } + + descs = append(descs, index.Manifests...) + default: + if IsLayerType(desc.MediaType) || IsKnownConfig(desc.MediaType) { + // childless data types. + return nil, nil + } + log.G(ctx).Debugf("encountered unknown type %v; children may not be fetched", desc.MediaType) + } + + return descs, nil +} + +// unknownDocument represents a manifest, manifest list, or index that has not +// yet been validated. +type unknownDocument struct { + MediaType string `json:"mediaType,omitempty"` + Config json.RawMessage `json:"config,omitempty"` + Layers json.RawMessage `json:"layers,omitempty"` + Manifests json.RawMessage `json:"manifests,omitempty"` + FSLayers json.RawMessage `json:"fsLayers,omitempty"` // schema 1 +} + +// validateMediaType returns an error if the byte slice is invalid JSON or if +// the media type identifies the blob as one format but it contains elements of +// another format. +func validateMediaType(b []byte, mt string) error { + var doc unknownDocument + if err := json.Unmarshal(b, &doc); err != nil { + return err + } + if len(doc.FSLayers) != 0 { + return fmt.Errorf("media-type: schema 1 not supported") + } + switch mt { + case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: + if len(doc.Manifests) != 0 || + doc.MediaType == MediaTypeDockerSchema2ManifestList || + doc.MediaType == ocispec.MediaTypeImageIndex { + return fmt.Errorf("media-type: expected manifest but found index (%s)", mt) + } + case MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: + if len(doc.Config) != 0 || len(doc.Layers) != 0 || + doc.MediaType == MediaTypeDockerSchema2Manifest || + doc.MediaType == ocispec.MediaTypeImageManifest { + return fmt.Errorf("media-type: expected index but found manifest (%s)", mt) + } + } + return nil +} + +// RootFS returns the unpacked diffids that make up and images rootfs. +// +// These are used to verify that a set of layers unpacked to the expected +// values. +func RootFS(ctx context.Context, provider content.Provider, configDesc ocispec.Descriptor) ([]digest.Digest, error) { + p, err := content.ReadBlob(ctx, provider, configDesc) + if err != nil { + return nil, err + } + + var config ocispec.Image + if err := json.Unmarshal(p, &config); err != nil { + return nil, err + } + return config.RootFS.DiffIDs, nil +} diff --git a/vendor/github.com/containerd/containerd/images/importexport.go b/vendor/github.com/containerd/containerd/images/importexport.go new file mode 100644 index 0000000000..843adcadc7 --- /dev/null +++ b/vendor/github.com/containerd/containerd/images/importexport.go @@ -0,0 +1,37 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package images + +import ( + "context" + "io" + + "github.com/containerd/containerd/content" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// Importer is the interface for image importer. +type Importer interface { + // Import imports an image from a tar stream. + Import(ctx context.Context, store content.Store, reader io.Reader) (ocispec.Descriptor, error) +} + +// Exporter is the interface for image exporter. +type Exporter interface { + // Export exports an image to a tar stream. + Export(ctx context.Context, store content.Provider, desc ocispec.Descriptor, writer io.Writer) error +} diff --git a/vendor/github.com/containerd/containerd/images/mediatypes.go b/vendor/github.com/containerd/containerd/images/mediatypes.go new file mode 100644 index 0000000000..785d71291e --- /dev/null +++ b/vendor/github.com/containerd/containerd/images/mediatypes.go @@ -0,0 +1,204 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package images + +import ( + "context" + "sort" + "strings" + + "github.com/containerd/containerd/errdefs" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// mediatype definitions for image components handled in containerd. +// +// oci components are generally referenced directly, although we may centralize +// here for clarity. +const ( + MediaTypeDockerSchema2Layer = "application/vnd.docker.image.rootfs.diff.tar" + MediaTypeDockerSchema2LayerForeign = "application/vnd.docker.image.rootfs.foreign.diff.tar" + MediaTypeDockerSchema2LayerGzip = "application/vnd.docker.image.rootfs.diff.tar.gzip" + MediaTypeDockerSchema2LayerForeignGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" + MediaTypeDockerSchema2Config = "application/vnd.docker.container.image.v1+json" + MediaTypeDockerSchema2Manifest = "application/vnd.docker.distribution.manifest.v2+json" + MediaTypeDockerSchema2ManifestList = "application/vnd.docker.distribution.manifest.list.v2+json" + // Checkpoint/Restore Media Types + MediaTypeContainerd1Checkpoint = "application/vnd.containerd.container.criu.checkpoint.criu.tar" + MediaTypeContainerd1CheckpointPreDump = "application/vnd.containerd.container.criu.checkpoint.predump.tar" + MediaTypeContainerd1Resource = "application/vnd.containerd.container.resource.tar" + MediaTypeContainerd1RW = "application/vnd.containerd.container.rw.tar" + MediaTypeContainerd1CheckpointConfig = "application/vnd.containerd.container.checkpoint.config.v1+proto" + MediaTypeContainerd1CheckpointOptions = "application/vnd.containerd.container.checkpoint.options.v1+proto" + MediaTypeContainerd1CheckpointRuntimeName = "application/vnd.containerd.container.checkpoint.runtime.name" + MediaTypeContainerd1CheckpointRuntimeOptions = "application/vnd.containerd.container.checkpoint.runtime.options+proto" + // Legacy Docker schema1 manifest + MediaTypeDockerSchema1Manifest = "application/vnd.docker.distribution.manifest.v1+prettyjws" + // Encypted media types + MediaTypeImageLayerEncrypted = ocispec.MediaTypeImageLayer + "+encrypted" + MediaTypeImageLayerGzipEncrypted = ocispec.MediaTypeImageLayerGzip + "+encrypted" +) + +// DiffCompression returns the compression as defined by the layer diff media +// type. For Docker media types without compression, "unknown" is returned to +// indicate that the media type may be compressed. If the media type is not +// recognized as a layer diff, then it returns errdefs.ErrNotImplemented +func DiffCompression(ctx context.Context, mediaType string) (string, error) { + base, ext := parseMediaTypes(mediaType) + switch base { + case MediaTypeDockerSchema2Layer, MediaTypeDockerSchema2LayerForeign: + if len(ext) > 0 { + // Type is wrapped + return "", nil + } + // These media types may have been compressed but failed to + // use the correct media type. The decompression function + // should detect and handle this case. + return "unknown", nil + case MediaTypeDockerSchema2LayerGzip, MediaTypeDockerSchema2LayerForeignGzip: + if len(ext) > 0 { + // Type is wrapped + return "", nil + } + return "gzip", nil + case ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerNonDistributable: + if len(ext) > 0 { + switch ext[len(ext)-1] { + case "gzip": + return "gzip", nil + case "zstd": + return "zstd", nil + } + } + return "", nil + default: + return "", errors.Wrapf(errdefs.ErrNotImplemented, "unrecognised mediatype %s", mediaType) + } +} + +// parseMediaTypes splits the media type into the base type and +// an array of sorted extensions +func parseMediaTypes(mt string) (string, []string) { + if mt == "" { + return "", []string{} + } + + s := strings.Split(mt, "+") + ext := s[1:] + sort.Strings(ext) + + return s[0], ext +} + +// IsNonDistributable returns true if the media type is non-distributable. +func IsNonDistributable(mt string) bool { + return strings.HasPrefix(mt, "application/vnd.oci.image.layer.nondistributable.") || + strings.HasPrefix(mt, "application/vnd.docker.image.rootfs.foreign.") +} + +// IsLayerType returns true if the media type is a layer +func IsLayerType(mt string) bool { + if strings.HasPrefix(mt, "application/vnd.oci.image.layer.") { + return true + } + + // Parse Docker media types, strip off any + suffixes first + base, _ := parseMediaTypes(mt) + switch base { + case MediaTypeDockerSchema2Layer, MediaTypeDockerSchema2LayerGzip, + MediaTypeDockerSchema2LayerForeign, MediaTypeDockerSchema2LayerForeignGzip: + return true + } + return false +} + +// IsDockerType returns true if the media type has "application/vnd.docker." prefix +func IsDockerType(mt string) bool { + return strings.HasPrefix(mt, "application/vnd.docker.") +} + +// IsManifestType returns true if the media type is an OCI-compatible manifest. +// No support for schema1 manifest. +func IsManifestType(mt string) bool { + switch mt { + case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: + return true + default: + return false + } +} + +// IsIndexType returns true if the media type is an OCI-compatible index. +func IsIndexType(mt string) bool { + switch mt { + case ocispec.MediaTypeImageIndex, MediaTypeDockerSchema2ManifestList: + return true + default: + return false + } +} + +// IsConfigType returns true if the media type is an OCI-compatible image config. +// No support for containerd checkpoint configs. +func IsConfigType(mt string) bool { + switch mt { + case MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig: + return true + default: + return false + } +} + +// IsKnownConfig returns true if the media type is a known config type, +// including containerd checkpoint configs +func IsKnownConfig(mt string) bool { + switch mt { + case MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig, + MediaTypeContainerd1Checkpoint, MediaTypeContainerd1CheckpointConfig: + return true + } + return false +} + +// ChildGCLabels returns the label for a given descriptor to reference it +func ChildGCLabels(desc ocispec.Descriptor) []string { + mt := desc.MediaType + if IsKnownConfig(mt) { + return []string{"containerd.io/gc.ref.content.config"} + } + + switch mt { + case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: + return []string{"containerd.io/gc.ref.content.m."} + } + + if IsLayerType(mt) { + return []string{"containerd.io/gc.ref.content.l."} + } + + return []string{"containerd.io/gc.ref.content."} +} + +// ChildGCLabelsFilterLayers returns the labels for a given descriptor to +// reference it, skipping layer media types +func ChildGCLabelsFilterLayers(desc ocispec.Descriptor) []string { + if IsLayerType(desc.MediaType) { + return nil + } + return ChildGCLabels(desc) +} diff --git a/vendor/github.com/containerd/containerd/labels/labels.go b/vendor/github.com/containerd/containerd/labels/labels.go new file mode 100644 index 0000000000..d76ff2cf9c --- /dev/null +++ b/vendor/github.com/containerd/containerd/labels/labels.go @@ -0,0 +1,21 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package labels + +// LabelUncompressed is added to compressed layer contents. +// The value is digest of the uncompressed content. +const LabelUncompressed = "containerd.io/uncompressed" diff --git a/vendor/github.com/containerd/containerd/labels/validate.go b/vendor/github.com/containerd/containerd/labels/validate.go new file mode 100644 index 0000000000..0de461663a --- /dev/null +++ b/vendor/github.com/containerd/containerd/labels/validate.go @@ -0,0 +1,37 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package labels + +import ( + "github.com/containerd/containerd/errdefs" + "github.com/pkg/errors" +) + +const ( + maxSize = 4096 +) + +// Validate a label's key and value are under 4096 bytes +func Validate(k, v string) error { + if (len(k) + len(v)) > maxSize { + if len(k) > 10 { + k = k[:10] + } + return errors.Wrapf(errdefs.ErrInvalidArgument, "label key and value greater than maximum size (%d bytes), key: %s", maxSize, k) + } + return nil +} diff --git a/vendor/github.com/containerd/containerd/log/context.go b/vendor/github.com/containerd/containerd/log/context.go new file mode 100644 index 0000000000..37b6a7d1c8 --- /dev/null +++ b/vendor/github.com/containerd/containerd/log/context.go @@ -0,0 +1,68 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package log + +import ( + "context" + + "github.com/sirupsen/logrus" +) + +var ( + // G is an alias for GetLogger. + // + // We may want to define this locally to a package to get package tagged log + // messages. + G = GetLogger + + // L is an alias for the standard logger. + L = logrus.NewEntry(logrus.StandardLogger()) +) + +type ( + loggerKey struct{} +) + +const ( + // RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to + // ensure the formatted time is always the same number of characters. + RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" + + // TextFormat represents the text logging format + TextFormat = "text" + + // JSONFormat represents the JSON logging format + JSONFormat = "json" +) + +// WithLogger returns a new context with the provided logger. Use in +// combination with logger.WithField(s) for great effect. +func WithLogger(ctx context.Context, logger *logrus.Entry) context.Context { + return context.WithValue(ctx, loggerKey{}, logger) +} + +// GetLogger retrieves the current logger from the context. If no logger is +// available, the default logger is returned. +func GetLogger(ctx context.Context) *logrus.Entry { + logger := ctx.Value(loggerKey{}) + + if logger == nil { + return L + } + + return logger.(*logrus.Entry) +} diff --git a/vendor/github.com/containerd/containerd/platforms/compare.go b/vendor/github.com/containerd/containerd/platforms/compare.go new file mode 100644 index 0000000000..c7657e1869 --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/compare.go @@ -0,0 +1,193 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "strconv" + "strings" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// MatchComparer is able to match and compare platforms to +// filter and sort platforms. +type MatchComparer interface { + Matcher + + Less(specs.Platform, specs.Platform) bool +} + +// platformVector returns an (ordered) vector of appropriate specs.Platform +// objects to try matching for the given platform object (see platforms.Only). +func platformVector(platform specs.Platform) []specs.Platform { + vector := []specs.Platform{platform} + + switch platform.Architecture { + case "amd64": + vector = append(vector, specs.Platform{ + Architecture: "386", + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: platform.Variant, + }) + case "arm": + if armVersion, err := strconv.Atoi(strings.TrimPrefix(platform.Variant, "v")); err == nil && armVersion > 5 { + for armVersion--; armVersion >= 5; armVersion-- { + vector = append(vector, specs.Platform{ + Architecture: platform.Architecture, + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: "v" + strconv.Itoa(armVersion), + }) + } + } + case "arm64": + variant := platform.Variant + if variant == "" { + variant = "v8" + } + vector = append(vector, platformVector(specs.Platform{ + Architecture: "arm", + OS: platform.OS, + OSVersion: platform.OSVersion, + OSFeatures: platform.OSFeatures, + Variant: variant, + })...) + } + + return vector +} + +// Only returns a match comparer for a single platform +// using default resolution logic for the platform. +// +// For arm/v8, will also match arm/v7, arm/v6 and arm/v5 +// For arm/v7, will also match arm/v6 and arm/v5 +// For arm/v6, will also match arm/v5 +// For amd64, will also match 386 +func Only(platform specs.Platform) MatchComparer { + return Ordered(platformVector(Normalize(platform))...) +} + +// OnlyStrict returns a match comparer for a single platform. +// +// Unlike Only, OnlyStrict does not match sub platforms. +// So, "arm/vN" will not match "arm/vM" where M < N, +// and "amd64" will not also match "386". +// +// OnlyStrict matches non-canonical forms. +// So, "arm64" matches "arm/64/v8". +func OnlyStrict(platform specs.Platform) MatchComparer { + return Ordered(Normalize(platform)) +} + +// Ordered returns a platform MatchComparer which matches any of the platforms +// but orders them in order they are provided. +func Ordered(platforms ...specs.Platform) MatchComparer { + matchers := make([]Matcher, len(platforms)) + for i := range platforms { + matchers[i] = NewMatcher(platforms[i]) + } + return orderedPlatformComparer{ + matchers: matchers, + } +} + +// Any returns a platform MatchComparer which matches any of the platforms +// with no preference for ordering. +func Any(platforms ...specs.Platform) MatchComparer { + matchers := make([]Matcher, len(platforms)) + for i := range platforms { + matchers[i] = NewMatcher(platforms[i]) + } + return anyPlatformComparer{ + matchers: matchers, + } +} + +// All is a platform MatchComparer which matches all platforms +// with preference for ordering. +var All MatchComparer = allPlatformComparer{} + +type orderedPlatformComparer struct { + matchers []Matcher +} + +func (c orderedPlatformComparer) Match(platform specs.Platform) bool { + for _, m := range c.matchers { + if m.Match(platform) { + return true + } + } + return false +} + +func (c orderedPlatformComparer) Less(p1 specs.Platform, p2 specs.Platform) bool { + for _, m := range c.matchers { + p1m := m.Match(p1) + p2m := m.Match(p2) + if p1m && !p2m { + return true + } + if p1m || p2m { + return false + } + } + return false +} + +type anyPlatformComparer struct { + matchers []Matcher +} + +func (c anyPlatformComparer) Match(platform specs.Platform) bool { + for _, m := range c.matchers { + if m.Match(platform) { + return true + } + } + return false +} + +func (c anyPlatformComparer) Less(p1, p2 specs.Platform) bool { + var p1m, p2m bool + for _, m := range c.matchers { + if !p1m && m.Match(p1) { + p1m = true + } + if !p2m && m.Match(p2) { + p2m = true + } + if p1m && p2m { + return false + } + } + // If one matches, and the other does, sort match first + return p1m && !p2m +} + +type allPlatformComparer struct{} + +func (allPlatformComparer) Match(specs.Platform) bool { + return true +} + +func (allPlatformComparer) Less(specs.Platform, specs.Platform) bool { + return false +} diff --git a/vendor/github.com/containerd/containerd/platforms/cpuinfo.go b/vendor/github.com/containerd/containerd/platforms/cpuinfo.go new file mode 100644 index 0000000000..4a7177e313 --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/cpuinfo.go @@ -0,0 +1,131 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "bufio" + "os" + "runtime" + "strings" + "sync" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/log" + "github.com/pkg/errors" +) + +// Present the ARM instruction set architecture, eg: v7, v8 +// Don't use this value directly; call cpuVariant() instead. +var cpuVariantValue string + +var cpuVariantOnce sync.Once + +func cpuVariant() string { + cpuVariantOnce.Do(func() { + if isArmArch(runtime.GOARCH) { + cpuVariantValue = getCPUVariant() + } + }) + return cpuVariantValue +} + +// For Linux, the kernel has already detected the ABI, ISA and Features. +// So we don't need to access the ARM registers to detect platform information +// by ourselves. We can just parse these information from /proc/cpuinfo +func getCPUInfo(pattern string) (info string, err error) { + if !isLinuxOS(runtime.GOOS) { + return "", errors.Wrapf(errdefs.ErrNotImplemented, "getCPUInfo for OS %s", runtime.GOOS) + } + + cpuinfo, err := os.Open("/proc/cpuinfo") + if err != nil { + return "", err + } + defer cpuinfo.Close() + + // Start to Parse the Cpuinfo line by line. For SMP SoC, we parse + // the first core is enough. + scanner := bufio.NewScanner(cpuinfo) + for scanner.Scan() { + newline := scanner.Text() + list := strings.Split(newline, ":") + + if len(list) > 1 && strings.EqualFold(strings.TrimSpace(list[0]), pattern) { + return strings.TrimSpace(list[1]), nil + } + } + + // Check whether the scanner encountered errors + err = scanner.Err() + if err != nil { + return "", err + } + + return "", errors.Wrapf(errdefs.ErrNotFound, "getCPUInfo for pattern: %s", pattern) +} + +func getCPUVariant() string { + if runtime.GOOS == "windows" || runtime.GOOS == "darwin" { + // Windows/Darwin only supports v7 for ARM32 and v8 for ARM64 and so we can use + // runtime.GOARCH to determine the variants + var variant string + switch runtime.GOARCH { + case "arm64": + variant = "v8" + case "arm": + variant = "v7" + default: + variant = "unknown" + } + + return variant + } + + variant, err := getCPUInfo("Cpu architecture") + if err != nil { + log.L.WithError(err).Error("failure getting variant") + return "" + } + + // handle edge case for Raspberry Pi ARMv6 devices (which due to a kernel quirk, report "CPU architecture: 7") + // https://www.raspberrypi.org/forums/viewtopic.php?t=12614 + if runtime.GOARCH == "arm" && variant == "7" { + model, err := getCPUInfo("model name") + if err == nil && strings.HasPrefix(strings.ToLower(model), "armv6-compatible") { + variant = "6" + } + } + + switch strings.ToLower(variant) { + case "8", "aarch64": + variant = "v8" + case "7", "7m", "?(12)", "?(13)", "?(14)", "?(15)", "?(16)", "?(17)": + variant = "v7" + case "6", "6tej": + variant = "v6" + case "5", "5t", "5te", "5tej": + variant = "v5" + case "4", "4t": + variant = "v4" + case "3": + variant = "v3" + default: + variant = "unknown" + } + + return variant +} diff --git a/vendor/github.com/containerd/containerd/platforms/database.go b/vendor/github.com/containerd/containerd/platforms/database.go new file mode 100644 index 0000000000..6ede94061e --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/database.go @@ -0,0 +1,114 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "runtime" + "strings" +) + +// isLinuxOS returns true if the operating system is Linux. +// +// The OS value should be normalized before calling this function. +func isLinuxOS(os string) bool { + return os == "linux" +} + +// These function are generated from https://golang.org/src/go/build/syslist.go. +// +// We use switch statements because they are slightly faster than map lookups +// and use a little less memory. + +// isKnownOS returns true if we know about the operating system. +// +// The OS value should be normalized before calling this function. +func isKnownOS(os string) bool { + switch os { + case "aix", "android", "darwin", "dragonfly", "freebsd", "hurd", "illumos", "js", "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos": + return true + } + return false +} + +// isArmArch returns true if the architecture is ARM. +// +// The arch value should be normalized before being passed to this function. +func isArmArch(arch string) bool { + switch arch { + case "arm", "arm64": + return true + } + return false +} + +// isKnownArch returns true if we know about the architecture. +// +// The arch value should be normalized before being passed to this function. +func isKnownArch(arch string) bool { + switch arch { + case "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", "ppc", "riscv", "riscv64", "s390", "s390x", "sparc", "sparc64", "wasm": + return true + } + return false +} + +func normalizeOS(os string) string { + if os == "" { + return runtime.GOOS + } + os = strings.ToLower(os) + + switch os { + case "macos": + os = "darwin" + } + return os +} + +// normalizeArch normalizes the architecture. +func normalizeArch(arch, variant string) (string, string) { + arch, variant = strings.ToLower(arch), strings.ToLower(variant) + switch arch { + case "i386": + arch = "386" + variant = "" + case "x86_64", "x86-64": + arch = "amd64" + variant = "" + case "aarch64", "arm64": + arch = "arm64" + switch variant { + case "8", "v8": + variant = "" + } + case "armhf": + arch = "arm" + variant = "v7" + case "armel": + arch = "arm" + variant = "v6" + case "arm": + switch variant { + case "", "7": + variant = "v7" + case "5", "6", "8": + variant = "v" + variant + } + } + + return arch, variant +} diff --git a/vendor/github.com/containerd/containerd/platforms/defaults.go b/vendor/github.com/containerd/containerd/platforms/defaults.go new file mode 100644 index 0000000000..cb77fbc9f7 --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/defaults.go @@ -0,0 +1,43 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "runtime" + + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// DefaultString returns the default string specifier for the platform. +func DefaultString() string { + return Format(DefaultSpec()) +} + +// DefaultSpec returns the current platform's default platform specification. +func DefaultSpec() specs.Platform { + return specs.Platform{ + OS: runtime.GOOS, + Architecture: runtime.GOARCH, + // The Variant field will be empty if arch != ARM. + Variant: cpuVariant(), + } +} + +// DefaultStrict returns strict form of Default. +func DefaultStrict() MatchComparer { + return OnlyStrict(DefaultSpec()) +} diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_unix.go b/vendor/github.com/containerd/containerd/platforms/defaults_unix.go new file mode 100644 index 0000000000..e8a7d5ffa0 --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/defaults_unix.go @@ -0,0 +1,24 @@ +// +build !windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +// Default returns the default matcher for the platform. +func Default() MatchComparer { + return Only(DefaultSpec()) +} diff --git a/vendor/github.com/containerd/containerd/platforms/defaults_windows.go b/vendor/github.com/containerd/containerd/platforms/defaults_windows.go new file mode 100644 index 0000000000..0c380e3b7c --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/defaults_windows.go @@ -0,0 +1,81 @@ +// +build windows + +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package platforms + +import ( + "fmt" + "runtime" + "strconv" + "strings" + + imagespec "github.com/opencontainers/image-spec/specs-go/v1" + specs "github.com/opencontainers/image-spec/specs-go/v1" + "golang.org/x/sys/windows" +) + +type matchComparer struct { + defaults Matcher + osVersionPrefix string +} + +// Match matches platform with the same windows major, minor +// and build version. +func (m matchComparer) Match(p imagespec.Platform) bool { + if m.defaults.Match(p) { + // TODO(windows): Figure out whether OSVersion is deprecated. + return strings.HasPrefix(p.OSVersion, m.osVersionPrefix) + } + return false +} + +// Less sorts matched platforms in front of other platforms. +// For matched platforms, it puts platforms with larger revision +// number in front. +func (m matchComparer) Less(p1, p2 imagespec.Platform) bool { + m1, m2 := m.Match(p1), m.Match(p2) + if m1 && m2 { + r1, r2 := revision(p1.OSVersion), revision(p2.OSVersion) + return r1 > r2 + } + return m1 && !m2 +} + +func revision(v string) int { + parts := strings.Split(v, ".") + if len(parts) < 4 { + return 0 + } + r, err := strconv.Atoi(parts[3]) + if err != nil { + return 0 + } + return r +} + +// Default returns the current platform's default platform specification. +func Default() MatchComparer { + major, minor, build := windows.RtlGetNtVersionNumbers() + return matchComparer{ + defaults: Ordered(DefaultSpec(), specs.Platform{ + OS: "linux", + Architecture: runtime.GOARCH, + }), + osVersionPrefix: fmt.Sprintf("%d.%d.%d", major, minor, build), + } +} diff --git a/vendor/github.com/containerd/containerd/platforms/platforms.go b/vendor/github.com/containerd/containerd/platforms/platforms.go new file mode 100644 index 0000000000..088bdea050 --- /dev/null +++ b/vendor/github.com/containerd/containerd/platforms/platforms.go @@ -0,0 +1,278 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package platforms provides a toolkit for normalizing, matching and +// specifying container platforms. +// +// Centered around OCI platform specifications, we define a string-based +// specifier syntax that can be used for user input. With a specifier, users +// only need to specify the parts of the platform that are relevant to their +// context, providing an operating system or architecture or both. +// +// How do I use this package? +// +// The vast majority of use cases should simply use the match function with +// user input. The first step is to parse a specifier into a matcher: +// +// m, err := Parse("linux") +// if err != nil { ... } +// +// Once you have a matcher, use it to match against the platform declared by a +// component, typically from an image or runtime. Since extracting an images +// platform is a little more involved, we'll use an example against the +// platform default: +// +// if ok := m.Match(Default()); !ok { /* doesn't match */ } +// +// This can be composed in loops for resolving runtimes or used as a filter for +// fetch and select images. +// +// More details of the specifier syntax and platform spec follow. +// +// Declaring Platform Support +// +// Components that have strict platform requirements should use the OCI +// platform specification to declare their support. Typically, this will be +// images and runtimes that should make these declaring which platform they +// support specifically. This looks roughly as follows: +// +// type Platform struct { +// Architecture string +// OS string +// Variant string +// } +// +// Most images and runtimes should at least set Architecture and OS, according +// to their GOARCH and GOOS values, respectively (follow the OCI image +// specification when in doubt). ARM should set variant under certain +// discussions, which are outlined below. +// +// Platform Specifiers +// +// While the OCI platform specifications provide a tool for components to +// specify structured information, user input typically doesn't need the full +// context and much can be inferred. To solve this problem, we introduced +// "specifiers". A specifier has the format +// `||/[/]`. The user can provide either the +// operating system or the architecture or both. +// +// An example of a common specifier is `linux/amd64`. If the host has a default +// of runtime that matches this, the user can simply provide the component that +// matters. For example, if a image provides amd64 and arm64 support, the +// operating system, `linux` can be inferred, so they only have to provide +// `arm64` or `amd64`. Similar behavior is implemented for operating systems, +// where the architecture may be known but a runtime may support images from +// different operating systems. +// +// Normalization +// +// Because not all users are familiar with the way the Go runtime represents +// platforms, several normalizations have been provided to make this package +// easier to user. +// +// The following are performed for architectures: +// +// Value Normalized +// aarch64 arm64 +// armhf arm +// armel arm/v6 +// i386 386 +// x86_64 amd64 +// x86-64 amd64 +// +// We also normalize the operating system `macos` to `darwin`. +// +// ARM Support +// +// To qualify ARM architecture, the Variant field is used to qualify the arm +// version. The most common arm version, v7, is represented without the variant +// unless it is explicitly provided. This is treated as equivalent to armhf. A +// previous architecture, armel, will be normalized to arm/v6. +// +// While these normalizations are provided, their support on arm platforms has +// not yet been fully implemented and tested. +package platforms + +import ( + "regexp" + "runtime" + "strconv" + "strings" + + "github.com/containerd/containerd/errdefs" + specs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +var ( + specifierRe = regexp.MustCompile(`^[A-Za-z0-9_-]+$`) +) + +// Matcher matches platforms specifications, provided by an image or runtime. +type Matcher interface { + Match(platform specs.Platform) bool +} + +// NewMatcher returns a simple matcher based on the provided platform +// specification. The returned matcher only looks for equality based on os, +// architecture and variant. +// +// One may implement their own matcher if this doesn't provide the required +// functionality. +// +// Applications should opt to use `Match` over directly parsing specifiers. +func NewMatcher(platform specs.Platform) Matcher { + return &matcher{ + Platform: Normalize(platform), + } +} + +type matcher struct { + specs.Platform +} + +func (m *matcher) Match(platform specs.Platform) bool { + normalized := Normalize(platform) + return m.OS == normalized.OS && + m.Architecture == normalized.Architecture && + m.Variant == normalized.Variant +} + +func (m *matcher) String() string { + return Format(m.Platform) +} + +// Parse parses the platform specifier syntax into a platform declaration. +// +// Platform specifiers are in the format `||/[/]`. +// The minimum required information for a platform specifier is the operating +// system or architecture. If there is only a single string (no slashes), the +// value will be matched against the known set of operating systems, then fall +// back to the known set of architectures. The missing component will be +// inferred based on the local environment. +func Parse(specifier string) (specs.Platform, error) { + if strings.Contains(specifier, "*") { + // TODO(stevvooe): need to work out exact wildcard handling + return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: wildcards not yet supported", specifier) + } + + parts := strings.Split(specifier, "/") + + for _, part := range parts { + if !specifierRe.MatchString(part) { + return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q is an invalid component of %q: platform specifier component must match %q", part, specifier, specifierRe.String()) + } + } + + var p specs.Platform + switch len(parts) { + case 1: + // in this case, we will test that the value might be an OS, then look + // it up. If it is not known, we'll treat it as an architecture. Since + // we have very little information about the platform here, we are + // going to be a little more strict if we don't know about the argument + // value. + p.OS = normalizeOS(parts[0]) + if isKnownOS(p.OS) { + // picks a default architecture + p.Architecture = runtime.GOARCH + if p.Architecture == "arm" && cpuVariant() != "v7" { + p.Variant = cpuVariant() + } + + return p, nil + } + + p.Architecture, p.Variant = normalizeArch(parts[0], "") + if p.Architecture == "arm" && p.Variant == "v7" { + p.Variant = "" + } + if isKnownArch(p.Architecture) { + p.OS = runtime.GOOS + return p, nil + } + + return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: unknown operating system or architecture", specifier) + case 2: + // In this case, we treat as a regular os/arch pair. We don't care + // about whether or not we know of the platform. + p.OS = normalizeOS(parts[0]) + p.Architecture, p.Variant = normalizeArch(parts[1], "") + if p.Architecture == "arm" && p.Variant == "v7" { + p.Variant = "" + } + + return p, nil + case 3: + // we have a fully specified variant, this is rare + p.OS = normalizeOS(parts[0]) + p.Architecture, p.Variant = normalizeArch(parts[1], parts[2]) + if p.Architecture == "arm64" && p.Variant == "" { + p.Variant = "v8" + } + + return p, nil + } + + return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: cannot parse platform specifier", specifier) +} + +// MustParse is like Parses but panics if the specifier cannot be parsed. +// Simplifies initialization of global variables. +func MustParse(specifier string) specs.Platform { + p, err := Parse(specifier) + if err != nil { + panic("platform: Parse(" + strconv.Quote(specifier) + "): " + err.Error()) + } + return p +} + +// Format returns a string specifier from the provided platform specification. +func Format(platform specs.Platform) string { + if platform.OS == "" { + return "unknown" + } + + return joinNotEmpty(platform.OS, platform.Architecture, platform.Variant) +} + +func joinNotEmpty(s ...string) string { + var ss []string + for _, s := range s { + if s == "" { + continue + } + + ss = append(ss, s) + } + + return strings.Join(ss, "/") +} + +// Normalize validates and translate the platform to the canonical value. +// +// For example, if "Aarch64" is encountered, we change it to "arm64" or if +// "x86_64" is encountered, it becomes "amd64". +func Normalize(platform specs.Platform) specs.Platform { + platform.OS = normalizeOS(platform.OS) + platform.Architecture, platform.Variant = normalizeArch(platform.Architecture, platform.Variant) + + // these fields are deprecated, remove them + platform.OSFeatures = nil + platform.OSVersion = "" + + return platform +} diff --git a/vendor/github.com/containerd/containerd/reference/reference.go b/vendor/github.com/containerd/containerd/reference/reference.go new file mode 100644 index 0000000000..a4bf6da601 --- /dev/null +++ b/vendor/github.com/containerd/containerd/reference/reference.go @@ -0,0 +1,166 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package reference + +import ( + "errors" + "fmt" + "net/url" + "path" + "regexp" + "strings" + + digest "github.com/opencontainers/go-digest" +) + +var ( + // ErrInvalid is returned when there is an invalid reference + ErrInvalid = errors.New("invalid reference") + // ErrObjectRequired is returned when the object is required + ErrObjectRequired = errors.New("object required") + // ErrHostnameRequired is returned when the hostname is required + ErrHostnameRequired = errors.New("hostname required") +) + +// Spec defines the main components of a reference specification. +// +// A reference specification is a schema-less URI parsed into common +// components. The two main components, locator and object, are required to be +// supported by remotes. It represents a superset of the naming define in +// docker's reference schema. It aims to be compatible but not prescriptive. +// +// While the interpretation of the components, locator and object, are up to +// the remote, we define a few common parts, accessible via helper methods. +// +// The first is the hostname, which is part of the locator. This doesn't need +// to map to a physical resource, but it must parse as a hostname. We refer to +// this as the namespace. +// +// The other component made accessible by helper method is the digest. This is +// part of the object identifier, always prefixed with an '@'. If present, the +// remote may use the digest portion directly or resolve it against a prefix. +// If the object does not include the `@` symbol, the return value for `Digest` +// will be empty. +type Spec struct { + // Locator is the host and path portion of the specification. The host + // portion may refer to an actual host or just a namespace of related + // images. + // + // Typically, the locator may used to resolve the remote to fetch specific + // resources. + Locator string + + // Object contains the identifier for the remote resource. Classically, + // this is a tag but can refer to anything in a remote. By convention, any + // portion that may be a partial or whole digest will be preceded by an + // `@`. Anything preceding the `@` will be referred to as the "tag". + // + // In practice, we will see this broken down into the following formats: + // + // 1. + // 2. @ + // 3. @ + // + // We define the tag to be anything except '@' and ':'. may + // be a full valid digest or shortened version, possibly with elided + // algorithm. + Object string +} + +var splitRe = regexp.MustCompile(`[:@]`) + +// Parse parses the string into a structured ref. +func Parse(s string) (Spec, error) { + if strings.Contains(s, "://") { + return Spec{}, ErrInvalid + } + + u, err := url.Parse("dummy://" + s) + if err != nil { + return Spec{}, err + } + + if u.Scheme != "dummy" { + return Spec{}, ErrInvalid + } + + if u.Host == "" { + return Spec{}, ErrHostnameRequired + } + + var object string + + if idx := splitRe.FindStringIndex(u.Path); idx != nil { + // This allows us to retain the @ to signify digests or shortened digests in + // the object. + object = u.Path[idx[0]:] + if object[:1] == ":" { + object = object[1:] + } + u.Path = u.Path[:idx[0]] + } + + return Spec{ + Locator: path.Join(u.Host, u.Path), + Object: object, + }, nil +} + +// Hostname returns the hostname portion of the locator. +// +// Remotes are not required to directly access the resources at this host. This +// method is provided for convenience. +func (r Spec) Hostname() string { + i := strings.Index(r.Locator, "/") + + if i < 0 { + return r.Locator + } + return r.Locator[:i] +} + +// Digest returns the digest portion of the reference spec. This may be a +// partial or invalid digest, which may be used to lookup a complete digest. +func (r Spec) Digest() digest.Digest { + _, dgst := SplitObject(r.Object) + return dgst +} + +// String returns the normalized string for the ref. +func (r Spec) String() string { + if r.Object == "" { + return r.Locator + } + if r.Object[:1] == "@" { + return fmt.Sprintf("%v%v", r.Locator, r.Object) + } + + return fmt.Sprintf("%v:%v", r.Locator, r.Object) +} + +// SplitObject provides two parts of the object spec, delimited by an `@` +// symbol. +// +// Either may be empty and it is the callers job to validate them +// appropriately. +func SplitObject(obj string) (tag string, dgst digest.Digest) { + parts := strings.SplitAfterN(obj, "@", 2) + if len(parts) < 2 { + return parts[0], "" + } + return parts[0], digest.Digest(parts[1]) +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go b/vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go new file mode 100644 index 0000000000..8b0a87e755 --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go @@ -0,0 +1,209 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package auth + +import ( + "context" + "encoding/json" + "net/http" + "net/url" + "strings" + "time" + + "github.com/containerd/containerd/log" + remoteserrors "github.com/containerd/containerd/remotes/errors" + "github.com/containerd/containerd/version" + "github.com/pkg/errors" + "golang.org/x/net/context/ctxhttp" +) + +var ( + // ErrNoToken is returned if a request is successful but the body does not + // contain an authorization token. + ErrNoToken = errors.New("authorization server did not include a token in the response") +) + +// GenerateTokenOptions generates options for fetching a token based on a challenge +func GenerateTokenOptions(ctx context.Context, host, username, secret string, c Challenge) (TokenOptions, error) { + realm, ok := c.Parameters["realm"] + if !ok { + return TokenOptions{}, errors.New("no realm specified for token auth challenge") + } + + realmURL, err := url.Parse(realm) + if err != nil { + return TokenOptions{}, errors.Wrap(err, "invalid token auth challenge realm") + } + + to := TokenOptions{ + Realm: realmURL.String(), + Service: c.Parameters["service"], + Username: username, + Secret: secret, + } + + scope, ok := c.Parameters["scope"] + if ok { + to.Scopes = append(to.Scopes, scope) + } else { + log.G(ctx).WithField("host", host).Debug("no scope specified for token auth challenge") + } + + return to, nil +} + +// TokenOptions are options for requesting a token +type TokenOptions struct { + Realm string + Service string + Scopes []string + Username string + Secret string +} + +// OAuthTokenResponse is response from fetching token with a OAuth POST request +type OAuthTokenResponse struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + ExpiresIn int `json:"expires_in"` + IssuedAt time.Time `json:"issued_at"` + Scope string `json:"scope"` +} + +// FetchTokenWithOAuth fetches a token using a POST request +func FetchTokenWithOAuth(ctx context.Context, client *http.Client, headers http.Header, clientID string, to TokenOptions) (*OAuthTokenResponse, error) { + form := url.Values{} + if len(to.Scopes) > 0 { + form.Set("scope", strings.Join(to.Scopes, " ")) + } + form.Set("service", to.Service) + form.Set("client_id", clientID) + + if to.Username == "" { + form.Set("grant_type", "refresh_token") + form.Set("refresh_token", to.Secret) + } else { + form.Set("grant_type", "password") + form.Set("username", to.Username) + form.Set("password", to.Secret) + } + + req, err := http.NewRequest("POST", to.Realm, strings.NewReader(form.Encode())) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", "application/x-www-form-urlencoded; charset=utf-8") + for k, v := range headers { + req.Header[k] = append(req.Header[k], v...) + } + if len(req.Header.Get("User-Agent")) == 0 { + req.Header.Set("User-Agent", "containerd/"+version.Version) + } + + resp, err := ctxhttp.Do(ctx, client, req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode < 200 || resp.StatusCode >= 400 { + return nil, errors.WithStack(remoteserrors.NewUnexpectedStatusErr(resp)) + } + + decoder := json.NewDecoder(resp.Body) + + var tr OAuthTokenResponse + if err = decoder.Decode(&tr); err != nil { + return nil, errors.Wrap(err, "unable to decode token response") + } + + if tr.AccessToken == "" { + return nil, errors.WithStack(ErrNoToken) + } + + return &tr, nil +} + +// FetchTokenResponse is response from fetching token with GET request +type FetchTokenResponse struct { + Token string `json:"token"` + AccessToken string `json:"access_token"` + ExpiresIn int `json:"expires_in"` + IssuedAt time.Time `json:"issued_at"` + RefreshToken string `json:"refresh_token"` +} + +// FetchToken fetches a token using a GET request +func FetchToken(ctx context.Context, client *http.Client, headers http.Header, to TokenOptions) (*FetchTokenResponse, error) { + req, err := http.NewRequest("GET", to.Realm, nil) + if err != nil { + return nil, err + } + + for k, v := range headers { + req.Header[k] = append(req.Header[k], v...) + } + if len(req.Header.Get("User-Agent")) == 0 { + req.Header.Set("User-Agent", "containerd/"+version.Version) + } + + reqParams := req.URL.Query() + + if to.Service != "" { + reqParams.Add("service", to.Service) + } + + for _, scope := range to.Scopes { + reqParams.Add("scope", scope) + } + + if to.Secret != "" { + req.SetBasicAuth(to.Username, to.Secret) + } + + req.URL.RawQuery = reqParams.Encode() + + resp, err := ctxhttp.Do(ctx, client, req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode < 200 || resp.StatusCode >= 400 { + return nil, errors.WithStack(remoteserrors.NewUnexpectedStatusErr(resp)) + } + + decoder := json.NewDecoder(resp.Body) + + var tr FetchTokenResponse + if err = decoder.Decode(&tr); err != nil { + return nil, errors.Wrap(err, "unable to decode token response") + } + + // `access_token` is equivalent to `token` and if both are specified + // the choice is undefined. Canonicalize `access_token` by sticking + // things in `token`. + if tr.AccessToken != "" { + tr.Token = tr.AccessToken + } + + if tr.Token == "" { + return nil, errors.WithStack(ErrNoToken) + } + + return &tr, nil +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/auth/parse.go b/vendor/github.com/containerd/containerd/remotes/docker/auth/parse.go new file mode 100644 index 0000000000..223fa2d052 --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/auth/parse.go @@ -0,0 +1,203 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package auth + +import ( + "net/http" + "sort" + "strings" +) + +// AuthenticationScheme defines scheme of the authentication method +type AuthenticationScheme byte + +const ( + // BasicAuth is scheme for Basic HTTP Authentication RFC 7617 + BasicAuth AuthenticationScheme = 1 << iota + // DigestAuth is scheme for HTTP Digest Access Authentication RFC 7616 + DigestAuth + // BearerAuth is scheme for OAuth 2.0 Bearer Tokens RFC 6750 + BearerAuth +) + +// Challenge carries information from a WWW-Authenticate response header. +// See RFC 2617. +type Challenge struct { + // scheme is the auth-scheme according to RFC 2617 + Scheme AuthenticationScheme + + // parameters are the auth-params according to RFC 2617 + Parameters map[string]string +} + +type byScheme []Challenge + +func (bs byScheme) Len() int { return len(bs) } +func (bs byScheme) Swap(i, j int) { bs[i], bs[j] = bs[j], bs[i] } + +// Sort in priority order: token > digest > basic +func (bs byScheme) Less(i, j int) bool { return bs[i].Scheme > bs[j].Scheme } + +// Octet types from RFC 2616. +type octetType byte + +var octetTypes [256]octetType + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) + if strings.ContainsRune(" \t\r\n", rune(c)) { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} + +// ParseAuthHeader parses challenges from WWW-Authenticate header +func ParseAuthHeader(header http.Header) []Challenge { + challenges := []Challenge{} + for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { + v, p := parseValueAndParams(h) + var s AuthenticationScheme + switch v { + case "basic": + s = BasicAuth + case "digest": + s = DigestAuth + case "bearer": + s = BearerAuth + default: + continue + } + challenges = append(challenges, Challenge{Scheme: s, Parameters: p}) + } + sort.Stable(byScheme(challenges)) + return challenges +} + +func parseValueAndParams(header string) (value string, params map[string]string) { + params = make(map[string]string) + value, s := expectToken(header) + if value == "" { + return + } + value = strings.ToLower(value) + for { + var pkey string + pkey, s = expectToken(skipSpace(s)) + if pkey == "" { + return + } + if !strings.HasPrefix(s, "=") { + return + } + var pvalue string + pvalue, s = expectTokenOrQuoted(s[1:]) + if pvalue == "" { + return + } + pkey = strings.ToLower(pkey) + params[pkey] = pvalue + s = skipSpace(s) + if !strings.HasPrefix(s, ",") { + return + } + s = s[1:] + } +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isToken == 0 { + break + } + } + return s[:i], s[i:] +} + +func expectTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return expectToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + 1; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go b/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go new file mode 100644 index 0000000000..67e4aea8da --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/authorizer.go @@ -0,0 +1,333 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "context" + "encoding/base64" + "fmt" + "net/http" + "strings" + "sync" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/remotes/docker/auth" + remoteerrors "github.com/containerd/containerd/remotes/errors" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type dockerAuthorizer struct { + credentials func(string) (string, string, error) + + client *http.Client + header http.Header + mu sync.Mutex + + // indexed by host name + handlers map[string]*authHandler +} + +// NewAuthorizer creates a Docker authorizer using the provided function to +// get credentials for the token server or basic auth. +// Deprecated: Use NewDockerAuthorizer +func NewAuthorizer(client *http.Client, f func(string) (string, string, error)) Authorizer { + return NewDockerAuthorizer(WithAuthClient(client), WithAuthCreds(f)) +} + +type authorizerConfig struct { + credentials func(string) (string, string, error) + client *http.Client + header http.Header +} + +// AuthorizerOpt configures an authorizer +type AuthorizerOpt func(*authorizerConfig) + +// WithAuthClient provides the HTTP client for the authorizer +func WithAuthClient(client *http.Client) AuthorizerOpt { + return func(opt *authorizerConfig) { + opt.client = client + } +} + +// WithAuthCreds provides a credential function to the authorizer +func WithAuthCreds(creds func(string) (string, string, error)) AuthorizerOpt { + return func(opt *authorizerConfig) { + opt.credentials = creds + } +} + +// WithAuthHeader provides HTTP headers for authorization +func WithAuthHeader(hdr http.Header) AuthorizerOpt { + return func(opt *authorizerConfig) { + opt.header = hdr + } +} + +// NewDockerAuthorizer creates an authorizer using Docker's registry +// authentication spec. +// See https://docs.docker.com/registry/spec/auth/ +func NewDockerAuthorizer(opts ...AuthorizerOpt) Authorizer { + var ao authorizerConfig + for _, opt := range opts { + opt(&ao) + } + + if ao.client == nil { + ao.client = http.DefaultClient + } + + return &dockerAuthorizer{ + credentials: ao.credentials, + client: ao.client, + header: ao.header, + handlers: make(map[string]*authHandler), + } +} + +// Authorize handles auth request. +func (a *dockerAuthorizer) Authorize(ctx context.Context, req *http.Request) error { + // skip if there is no auth handler + ah := a.getAuthHandler(req.URL.Host) + if ah == nil { + return nil + } + + auth, err := ah.authorize(ctx) + if err != nil { + return err + } + + req.Header.Set("Authorization", auth) + return nil +} + +func (a *dockerAuthorizer) getAuthHandler(host string) *authHandler { + a.mu.Lock() + defer a.mu.Unlock() + + return a.handlers[host] +} + +func (a *dockerAuthorizer) AddResponses(ctx context.Context, responses []*http.Response) error { + last := responses[len(responses)-1] + host := last.Request.URL.Host + + a.mu.Lock() + defer a.mu.Unlock() + for _, c := range auth.ParseAuthHeader(last.Header) { + if c.Scheme == auth.BearerAuth { + if err := invalidAuthorization(c, responses); err != nil { + delete(a.handlers, host) + return err + } + + // reuse existing handler + // + // assume that one registry will return the common + // challenge information, including realm and service. + // and the resource scope is only different part + // which can be provided by each request. + if _, ok := a.handlers[host]; ok { + return nil + } + + var username, secret string + if a.credentials != nil { + var err error + username, secret, err = a.credentials(host) + if err != nil { + return err + } + } + + common, err := auth.GenerateTokenOptions(ctx, host, username, secret, c) + if err != nil { + return err + } + + a.handlers[host] = newAuthHandler(a.client, a.header, c.Scheme, common) + return nil + } else if c.Scheme == auth.BasicAuth && a.credentials != nil { + username, secret, err := a.credentials(host) + if err != nil { + return err + } + + if username != "" && secret != "" { + common := auth.TokenOptions{ + Username: username, + Secret: secret, + } + + a.handlers[host] = newAuthHandler(a.client, a.header, c.Scheme, common) + return nil + } + } + } + return errors.Wrap(errdefs.ErrNotImplemented, "failed to find supported auth scheme") +} + +// authResult is used to control limit rate. +type authResult struct { + sync.WaitGroup + token string + err error +} + +// authHandler is used to handle auth request per registry server. +type authHandler struct { + sync.Mutex + + header http.Header + + client *http.Client + + // only support basic and bearer schemes + scheme auth.AuthenticationScheme + + // common contains common challenge answer + common auth.TokenOptions + + // scopedTokens caches token indexed by scopes, which used in + // bearer auth case + scopedTokens map[string]*authResult +} + +func newAuthHandler(client *http.Client, hdr http.Header, scheme auth.AuthenticationScheme, opts auth.TokenOptions) *authHandler { + return &authHandler{ + header: hdr, + client: client, + scheme: scheme, + common: opts, + scopedTokens: map[string]*authResult{}, + } +} + +func (ah *authHandler) authorize(ctx context.Context) (string, error) { + switch ah.scheme { + case auth.BasicAuth: + return ah.doBasicAuth(ctx) + case auth.BearerAuth: + return ah.doBearerAuth(ctx) + default: + return "", errors.Wrapf(errdefs.ErrNotImplemented, "failed to find supported auth scheme: %s", string(ah.scheme)) + } +} + +func (ah *authHandler) doBasicAuth(ctx context.Context) (string, error) { + username, secret := ah.common.Username, ah.common.Secret + + if username == "" || secret == "" { + return "", fmt.Errorf("failed to handle basic auth because missing username or secret") + } + + auth := base64.StdEncoding.EncodeToString([]byte(username + ":" + secret)) + return fmt.Sprintf("Basic %s", auth), nil +} + +func (ah *authHandler) doBearerAuth(ctx context.Context) (token string, err error) { + // copy common tokenOptions + to := ah.common + + to.Scopes = GetTokenScopes(ctx, to.Scopes) + + // Docs: https://docs.docker.com/registry/spec/auth/scope + scoped := strings.Join(to.Scopes, " ") + + ah.Lock() + if r, exist := ah.scopedTokens[scoped]; exist { + ah.Unlock() + r.Wait() + return r.token, r.err + } + + // only one fetch token job + r := new(authResult) + r.Add(1) + ah.scopedTokens[scoped] = r + ah.Unlock() + + defer func() { + token = fmt.Sprintf("Bearer %s", token) + r.token, r.err = token, err + r.Done() + }() + + // fetch token for the resource scope + if to.Secret != "" { + defer func() { + err = errors.Wrap(err, "failed to fetch oauth token") + }() + // credential information is provided, use oauth POST endpoint + // TODO: Allow setting client_id + resp, err := auth.FetchTokenWithOAuth(ctx, ah.client, ah.header, "containerd-client", to) + if err != nil { + var errStatus remoteerrors.ErrUnexpectedStatus + if errors.As(err, &errStatus) { + // Registries without support for POST may return 404 for POST /v2/token. + // As of September 2017, GCR is known to return 404. + // As of February 2018, JFrog Artifactory is known to return 401. + if (errStatus.StatusCode == 405 && to.Username != "") || errStatus.StatusCode == 404 || errStatus.StatusCode == 401 { + resp, err := auth.FetchToken(ctx, ah.client, ah.header, to) + if err != nil { + return "", err + } + return resp.Token, nil + } + log.G(ctx).WithFields(logrus.Fields{ + "status": errStatus.Status, + "body": string(errStatus.Body), + }).Debugf("token request failed") + } + return "", err + } + return resp.AccessToken, nil + } + // do request anonymously + resp, err := auth.FetchToken(ctx, ah.client, ah.header, to) + if err != nil { + return "", errors.Wrap(err, "failed to fetch anonymous token") + } + return resp.Token, nil +} + +func invalidAuthorization(c auth.Challenge, responses []*http.Response) error { + errStr := c.Parameters["error"] + if errStr == "" { + return nil + } + + n := len(responses) + if n == 1 || (n > 1 && !sameRequest(responses[n-2].Request, responses[n-1].Request)) { + return nil + } + + return errors.Wrapf(ErrInvalidAuthorization, "server message: %s", errStr) +} + +func sameRequest(r1, r2 *http.Request) bool { + if r1.Method != r2.Method { + return false + } + if *r1.URL != *r2.URL { + return false + } + return true +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/converter.go b/vendor/github.com/containerd/containerd/remotes/docker/converter.go new file mode 100644 index 0000000000..43e6b372c1 --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/converter.go @@ -0,0 +1,88 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/remotes" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// LegacyConfigMediaType should be replaced by OCI image spec. +// +// More detail: docker/distribution#1622 +const LegacyConfigMediaType = "application/octet-stream" + +// ConvertManifest changes application/octet-stream to schema2 config media type if need. +// +// NOTE: +// 1. original manifest will be deleted by next gc round. +// 2. don't cover manifest list. +func ConvertManifest(ctx context.Context, store content.Store, desc ocispec.Descriptor) (ocispec.Descriptor, error) { + if !(desc.MediaType == images.MediaTypeDockerSchema2Manifest || + desc.MediaType == ocispec.MediaTypeImageManifest) { + + log.G(ctx).Warnf("do nothing for media type: %s", desc.MediaType) + return desc, nil + } + + // read manifest data + mb, err := content.ReadBlob(ctx, store, desc) + if err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to read index data") + } + + var manifest ocispec.Manifest + if err := json.Unmarshal(mb, &manifest); err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to unmarshal data into manifest") + } + + // check config media type + if manifest.Config.MediaType != LegacyConfigMediaType { + return desc, nil + } + + manifest.Config.MediaType = images.MediaTypeDockerSchema2Config + data, err := json.MarshalIndent(manifest, "", " ") + if err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to marshal manifest") + } + + // update manifest with gc labels + desc.Digest = digest.Canonical.FromBytes(data) + desc.Size = int64(len(data)) + + labels := map[string]string{} + for i, c := range append([]ocispec.Descriptor{manifest.Config}, manifest.Layers...) { + labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i)] = c.Digest.String() + } + + ref := remotes.MakeRefKey(ctx, desc) + if err := content.WriteBlob(ctx, store, ref, bytes.NewReader(data), desc, content.WithLabels(labels)); err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to update content") + } + return desc, nil +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/errcode.go b/vendor/github.com/containerd/containerd/remotes/docker/errcode.go new file mode 100644 index 0000000000..8c623bcbef --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/errcode.go @@ -0,0 +1,283 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "encoding/json" + "fmt" + "strings" +) + +// ErrorCoder is the base interface for ErrorCode and Error allowing +// users of each to just call ErrorCode to get the real ID of each +type ErrorCoder interface { + ErrorCode() ErrorCode +} + +// ErrorCode represents the error type. The errors are serialized via strings +// and the integer format may change and should *never* be exported. +type ErrorCode int + +var _ error = ErrorCode(0) + +// ErrorCode just returns itself +func (ec ErrorCode) ErrorCode() ErrorCode { + return ec +} + +// Error returns the ID/Value +func (ec ErrorCode) Error() string { + // NOTE(stevvooe): Cannot use message here since it may have unpopulated args. + return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1)) +} + +// Descriptor returns the descriptor for the error code. +func (ec ErrorCode) Descriptor() ErrorDescriptor { + d, ok := errorCodeToDescriptors[ec] + + if !ok { + return ErrorCodeUnknown.Descriptor() + } + + return d +} + +// String returns the canonical identifier for this error code. +func (ec ErrorCode) String() string { + return ec.Descriptor().Value +} + +// Message returned the human-readable error message for this error code. +func (ec ErrorCode) Message() string { + return ec.Descriptor().Message +} + +// MarshalText encodes the receiver into UTF-8-encoded text and returns the +// result. +func (ec ErrorCode) MarshalText() (text []byte, err error) { + return []byte(ec.String()), nil +} + +// UnmarshalText decodes the form generated by MarshalText. +func (ec *ErrorCode) UnmarshalText(text []byte) error { + desc, ok := idToDescriptors[string(text)] + + if !ok { + desc = ErrorCodeUnknown.Descriptor() + } + + *ec = desc.Code + + return nil +} + +// WithMessage creates a new Error struct based on the passed-in info and +// overrides the Message property. +func (ec ErrorCode) WithMessage(message string) Error { + return Error{ + Code: ec, + Message: message, + } +} + +// WithDetail creates a new Error struct based on the passed-in info and +// set the Detail property appropriately +func (ec ErrorCode) WithDetail(detail interface{}) Error { + return Error{ + Code: ec, + Message: ec.Message(), + }.WithDetail(detail) +} + +// WithArgs creates a new Error struct and sets the Args slice +func (ec ErrorCode) WithArgs(args ...interface{}) Error { + return Error{ + Code: ec, + Message: ec.Message(), + }.WithArgs(args...) +} + +// Error provides a wrapper around ErrorCode with extra Details provided. +type Error struct { + Code ErrorCode `json:"code"` + Message string `json:"message"` + Detail interface{} `json:"detail,omitempty"` + + // TODO(duglin): See if we need an "args" property so we can do the + // variable substitution right before showing the message to the user +} + +var _ error = Error{} + +// ErrorCode returns the ID/Value of this Error +func (e Error) ErrorCode() ErrorCode { + return e.Code +} + +// Error returns a human readable representation of the error. +func (e Error) Error() string { + return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message) +} + +// WithDetail will return a new Error, based on the current one, but with +// some Detail info added +func (e Error) WithDetail(detail interface{}) Error { + return Error{ + Code: e.Code, + Message: e.Message, + Detail: detail, + } +} + +// WithArgs uses the passed-in list of interface{} as the substitution +// variables in the Error's Message string, but returns a new Error +func (e Error) WithArgs(args ...interface{}) Error { + return Error{ + Code: e.Code, + Message: fmt.Sprintf(e.Code.Message(), args...), + Detail: e.Detail, + } +} + +// ErrorDescriptor provides relevant information about a given error code. +type ErrorDescriptor struct { + // Code is the error code that this descriptor describes. + Code ErrorCode + + // Value provides a unique, string key, often captilized with + // underscores, to identify the error code. This value is used as the + // keyed value when serializing api errors. + Value string + + // Message is a short, human readable description of the error condition + // included in API responses. + Message string + + // Description provides a complete account of the errors purpose, suitable + // for use in documentation. + Description string + + // HTTPStatusCode provides the http status code that is associated with + // this error condition. + HTTPStatusCode int +} + +// ParseErrorCode returns the value by the string error code. +// `ErrorCodeUnknown` will be returned if the error is not known. +func ParseErrorCode(value string) ErrorCode { + ed, ok := idToDescriptors[value] + if ok { + return ed.Code + } + + return ErrorCodeUnknown +} + +// Errors provides the envelope for multiple errors and a few sugar methods +// for use within the application. +type Errors []error + +var _ error = Errors{} + +func (errs Errors) Error() string { + switch len(errs) { + case 0: + return "" + case 1: + return errs[0].Error() + default: + msg := "errors:\n" + for _, err := range errs { + msg += err.Error() + "\n" + } + return msg + } +} + +// Len returns the current number of errors. +func (errs Errors) Len() int { + return len(errs) +} + +// MarshalJSON converts slice of error, ErrorCode or Error into a +// slice of Error - then serializes +func (errs Errors) MarshalJSON() ([]byte, error) { + var tmpErrs struct { + Errors []Error `json:"errors,omitempty"` + } + + for _, daErr := range errs { + var err Error + + switch daErr := daErr.(type) { + case ErrorCode: + err = daErr.WithDetail(nil) + case Error: + err = daErr + default: + err = ErrorCodeUnknown.WithDetail(daErr) + + } + + // If the Error struct was setup and they forgot to set the + // Message field (meaning its "") then grab it from the ErrCode + msg := err.Message + if msg == "" { + msg = err.Code.Message() + } + + tmpErrs.Errors = append(tmpErrs.Errors, Error{ + Code: err.Code, + Message: msg, + Detail: err.Detail, + }) + } + + return json.Marshal(tmpErrs) +} + +// UnmarshalJSON deserializes []Error and then converts it into slice of +// Error or ErrorCode +func (errs *Errors) UnmarshalJSON(data []byte) error { + var tmpErrs struct { + Errors []Error + } + + if err := json.Unmarshal(data, &tmpErrs); err != nil { + return err + } + + var newErrs Errors + for _, daErr := range tmpErrs.Errors { + // If Message is empty or exactly matches the Code's message string + // then just use the Code, no need for a full Error struct + if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) { + // Error's w/o details get converted to ErrorCode + newErrs = append(newErrs, daErr.Code) + } else { + // Error's w/ details are untouched + newErrs = append(newErrs, Error{ + Code: daErr.Code, + Message: daErr.Message, + Detail: daErr.Detail, + }) + } + } + + *errs = newErrs + return nil +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/errdesc.go b/vendor/github.com/containerd/containerd/remotes/docker/errdesc.go new file mode 100644 index 0000000000..b2bd4d82bd --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/errdesc.go @@ -0,0 +1,154 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "fmt" + "net/http" + "sort" + "sync" +) + +var ( + errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{} + idToDescriptors = map[string]ErrorDescriptor{} + groupToDescriptors = map[string][]ErrorDescriptor{} +) + +var ( + // ErrorCodeUnknown is a generic error that can be used as a last + // resort if there is no situation-specific error message that can be used + ErrorCodeUnknown = Register("errcode", ErrorDescriptor{ + Value: "UNKNOWN", + Message: "unknown error", + Description: `Generic error returned when the error does not have an + API classification.`, + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeUnsupported is returned when an operation is not supported. + ErrorCodeUnsupported = Register("errcode", ErrorDescriptor{ + Value: "UNSUPPORTED", + Message: "The operation is unsupported.", + Description: `The operation was unsupported due to a missing + implementation or invalid set of parameters.`, + HTTPStatusCode: http.StatusMethodNotAllowed, + }) + + // ErrorCodeUnauthorized is returned if a request requires + // authentication. + ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{ + Value: "UNAUTHORIZED", + Message: "authentication required", + Description: `The access controller was unable to authenticate + the client. Often this will be accompanied by a + Www-Authenticate HTTP response header indicating how to + authenticate.`, + HTTPStatusCode: http.StatusUnauthorized, + }) + + // ErrorCodeDenied is returned if a client does not have sufficient + // permission to perform an action. + ErrorCodeDenied = Register("errcode", ErrorDescriptor{ + Value: "DENIED", + Message: "requested access to the resource is denied", + Description: `The access controller denied access for the + operation on a resource.`, + HTTPStatusCode: http.StatusForbidden, + }) + + // ErrorCodeUnavailable provides a common error to report unavailability + // of a service or endpoint. + ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{ + Value: "UNAVAILABLE", + Message: "service unavailable", + Description: "Returned when a service is not available", + HTTPStatusCode: http.StatusServiceUnavailable, + }) + + // ErrorCodeTooManyRequests is returned if a client attempts too many + // times to contact a service endpoint. + ErrorCodeTooManyRequests = Register("errcode", ErrorDescriptor{ + Value: "TOOMANYREQUESTS", + Message: "too many requests", + Description: `Returned when a client attempts to contact a + service too many times`, + HTTPStatusCode: http.StatusTooManyRequests, + }) +) + +var nextCode = 1000 +var registerLock sync.Mutex + +// Register will make the passed-in error known to the environment and +// return a new ErrorCode +func Register(group string, descriptor ErrorDescriptor) ErrorCode { + registerLock.Lock() + defer registerLock.Unlock() + + descriptor.Code = ErrorCode(nextCode) + + if _, ok := idToDescriptors[descriptor.Value]; ok { + panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value)) + } + if _, ok := errorCodeToDescriptors[descriptor.Code]; ok { + panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code)) + } + + groupToDescriptors[group] = append(groupToDescriptors[group], descriptor) + errorCodeToDescriptors[descriptor.Code] = descriptor + idToDescriptors[descriptor.Value] = descriptor + + nextCode++ + return descriptor.Code +} + +type byValue []ErrorDescriptor + +func (a byValue) Len() int { return len(a) } +func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value } + +// GetGroupNames returns the list of Error group names that are registered +func GetGroupNames() []string { + keys := []string{} + + for k := range groupToDescriptors { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +// GetErrorCodeGroup returns the named group of error descriptors +func GetErrorCodeGroup(name string) []ErrorDescriptor { + desc := groupToDescriptors[name] + sort.Sort(byValue(desc)) + return desc +} + +// GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are +// registered, irrespective of what group they're in +func GetErrorAllDescriptors() []ErrorDescriptor { + result := []ErrorDescriptor{} + + for _, group := range GetGroupNames() { + result = append(result, GetErrorCodeGroup(group)...) + } + sort.Sort(byValue(result)) + return result +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go b/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go new file mode 100644 index 0000000000..4b2c10e9a3 --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/fetcher.go @@ -0,0 +1,216 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "context" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type dockerFetcher struct { + *dockerBase +} + +func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) { + ctx = log.WithLogger(ctx, log.G(ctx).WithField("digest", desc.Digest)) + + hosts := r.filterHosts(HostCapabilityPull) + if len(hosts) == 0 { + return nil, errors.Wrap(errdefs.ErrNotFound, "no pull hosts") + } + + ctx, err := ContextWithRepositoryScope(ctx, r.refspec, false) + if err != nil { + return nil, err + } + + return newHTTPReadSeeker(desc.Size, func(offset int64) (io.ReadCloser, error) { + // firstly try fetch via external urls + for _, us := range desc.URLs { + ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", us)) + + u, err := url.Parse(us) + if err != nil { + log.G(ctx).WithError(err).Debug("failed to parse") + continue + } + if u.Scheme != "http" && u.Scheme != "https" { + log.G(ctx).Debug("non-http(s) alternative url is unsupported") + continue + } + log.G(ctx).Debug("trying alternative url") + + // Try this first, parse it + host := RegistryHost{ + Client: http.DefaultClient, + Host: u.Host, + Scheme: u.Scheme, + Path: u.Path, + Capabilities: HostCapabilityPull, + } + req := r.request(host, http.MethodGet) + // Strip namespace from base + req.path = u.Path + if u.RawQuery != "" { + req.path = req.path + "?" + u.RawQuery + } + + rc, err := r.open(ctx, req, desc.MediaType, offset) + if err != nil { + if errdefs.IsNotFound(err) { + continue // try one of the other urls. + } + + return nil, err + } + + return rc, nil + } + + // Try manifests endpoints for manifests types + switch desc.MediaType { + case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList, + images.MediaTypeDockerSchema1Manifest, + ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex: + + var firstErr error + for _, host := range r.hosts { + req := r.request(host, http.MethodGet, "manifests", desc.Digest.String()) + if err := req.addNamespace(r.refspec.Hostname()); err != nil { + return nil, err + } + + rc, err := r.open(ctx, req, desc.MediaType, offset) + if err != nil { + // Store the error for referencing later + if firstErr == nil { + firstErr = err + } + continue // try another host + } + + return rc, nil + } + + return nil, firstErr + } + + // Finally use blobs endpoints + var firstErr error + for _, host := range r.hosts { + req := r.request(host, http.MethodGet, "blobs", desc.Digest.String()) + if err := req.addNamespace(r.refspec.Hostname()); err != nil { + return nil, err + } + + rc, err := r.open(ctx, req, desc.MediaType, offset) + if err != nil { + // Store the error for referencing later + if firstErr == nil { + firstErr = err + } + continue // try another host + } + + return rc, nil + } + + if errdefs.IsNotFound(firstErr) { + firstErr = errors.Wrapf(errdefs.ErrNotFound, + "could not fetch content descriptor %v (%v) from remote", + desc.Digest, desc.MediaType) + } + + return nil, firstErr + + }) +} + +func (r dockerFetcher) open(ctx context.Context, req *request, mediatype string, offset int64) (_ io.ReadCloser, retErr error) { + req.header.Set("Accept", strings.Join([]string{mediatype, `*/*`}, ", ")) + + if offset > 0 { + // Note: "Accept-Ranges: bytes" cannot be trusted as some endpoints + // will return the header without supporting the range. The content + // range must always be checked. + req.header.Set("Range", fmt.Sprintf("bytes=%d-", offset)) + } + + resp, err := req.doWithRetries(ctx, nil) + if err != nil { + return nil, err + } + defer func() { + if retErr != nil { + resp.Body.Close() + } + }() + + if resp.StatusCode > 299 { + // TODO(stevvooe): When doing a offset specific request, we should + // really distinguish between a 206 and a 200. In the case of 200, we + // can discard the bytes, hiding the seek behavior from the + // implementation. + + if resp.StatusCode == http.StatusNotFound { + return nil, errors.Wrapf(errdefs.ErrNotFound, "content at %v not found", req.String()) + } + var registryErr Errors + if err := json.NewDecoder(resp.Body).Decode(®istryErr); err != nil || registryErr.Len() < 1 { + return nil, errors.Errorf("unexpected status code %v: %v", req.String(), resp.Status) + } + return nil, errors.Errorf("unexpected status code %v: %s - Server message: %s", req.String(), resp.Status, registryErr.Error()) + } + if offset > 0 { + cr := resp.Header.Get("content-range") + if cr != "" { + if !strings.HasPrefix(cr, fmt.Sprintf("bytes %d-", offset)) { + return nil, errors.Errorf("unhandled content range in response: %v", cr) + + } + } else { + // TODO: Should any cases where use of content range + // without the proper header be considered? + // 206 responses? + + // Discard up to offset + // Could use buffer pool here but this case should be rare + n, err := io.Copy(ioutil.Discard, io.LimitReader(resp.Body, offset)) + if err != nil { + return nil, errors.Wrap(err, "failed to discard to offset") + } + if n != offset { + return nil, errors.Errorf("unable to discard to offset") + } + + } + } + + return resp.Body, nil +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/handler.go b/vendor/github.com/containerd/containerd/remotes/docker/handler.go new file mode 100644 index 0000000000..529cfbc274 --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/handler.go @@ -0,0 +1,154 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "context" + "fmt" + "net/url" + "strings" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/labels" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/reference" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +var ( + // labelDistributionSource describes the source blob comes from. + labelDistributionSource = "containerd.io/distribution.source" +) + +// AppendDistributionSourceLabel updates the label of blob with distribution source. +func AppendDistributionSourceLabel(manager content.Manager, ref string) (images.HandlerFunc, error) { + refspec, err := reference.Parse(ref) + if err != nil { + return nil, err + } + + u, err := url.Parse("dummy://" + refspec.Locator) + if err != nil { + return nil, err + } + + source, repo := u.Hostname(), strings.TrimPrefix(u.Path, "/") + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + info, err := manager.Info(ctx, desc.Digest) + if err != nil { + return nil, err + } + + key := distributionSourceLabelKey(source) + + originLabel := "" + if info.Labels != nil { + originLabel = info.Labels[key] + } + value := appendDistributionSourceLabel(originLabel, repo) + + // The repo name has been limited under 256 and the distribution + // label might hit the limitation of label size, when blob data + // is used as the very, very common layer. + if err := labels.Validate(key, value); err != nil { + log.G(ctx).Warnf("skip to append distribution label: %s", err) + return nil, nil + } + + info = content.Info{ + Digest: desc.Digest, + Labels: map[string]string{ + key: value, + }, + } + _, err = manager.Update(ctx, info, fmt.Sprintf("labels.%s", key)) + return nil, err + }, nil +} + +func appendDistributionSourceLabel(originLabel, repo string) string { + repos := []string{} + if originLabel != "" { + repos = strings.Split(originLabel, ",") + } + repos = append(repos, repo) + + // use empty string to present duplicate items + for i := 1; i < len(repos); i++ { + tmp, j := repos[i], i-1 + for ; j >= 0 && repos[j] >= tmp; j-- { + if repos[j] == tmp { + tmp = "" + } + repos[j+1] = repos[j] + } + repos[j+1] = tmp + } + + i := 0 + for ; i < len(repos) && repos[i] == ""; i++ { + } + + return strings.Join(repos[i:], ",") +} + +func distributionSourceLabelKey(source string) string { + return fmt.Sprintf("%s.%s", labelDistributionSource, source) +} + +// selectRepositoryMountCandidate will select the repo which has longest +// common prefix components as the candidate. +func selectRepositoryMountCandidate(refspec reference.Spec, sources map[string]string) string { + u, err := url.Parse("dummy://" + refspec.Locator) + if err != nil { + // NOTE: basically, it won't be error here + return "" + } + + source, target := u.Hostname(), strings.TrimPrefix(u.Path, "/") + repoLabel, ok := sources[distributionSourceLabelKey(source)] + if !ok || repoLabel == "" { + return "" + } + + n, match := 0, "" + components := strings.Split(target, "/") + for _, repo := range strings.Split(repoLabel, ",") { + // the target repo is not a candidate + if repo == target { + continue + } + + if l := commonPrefixComponents(components, repo); l >= n { + n, match = l, repo + } + } + return match +} + +func commonPrefixComponents(components []string, target string) int { + targetComponents := strings.Split(target, "/") + + i := 0 + for ; i < len(components) && i < len(targetComponents); i++ { + if components[i] != targetComponents[i] { + break + } + } + return i +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/httpreadseeker.go b/vendor/github.com/containerd/containerd/remotes/docker/httpreadseeker.go new file mode 100644 index 0000000000..58c866bcde --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/httpreadseeker.go @@ -0,0 +1,169 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "bytes" + "io" + "io/ioutil" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/log" + "github.com/pkg/errors" +) + +const maxRetry = 3 + +type httpReadSeeker struct { + size int64 + offset int64 + rc io.ReadCloser + open func(offset int64) (io.ReadCloser, error) + closed bool + + errsWithNoProgress int +} + +func newHTTPReadSeeker(size int64, open func(offset int64) (io.ReadCloser, error)) (io.ReadCloser, error) { + return &httpReadSeeker{ + size: size, + open: open, + }, nil +} + +func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) { + if hrs.closed { + return 0, io.EOF + } + + rd, err := hrs.reader() + if err != nil { + return 0, err + } + + n, err = rd.Read(p) + hrs.offset += int64(n) + if n > 0 || err == nil { + hrs.errsWithNoProgress = 0 + } + if err == io.ErrUnexpectedEOF { + // connection closed unexpectedly. try reconnecting. + if n == 0 { + hrs.errsWithNoProgress++ + if hrs.errsWithNoProgress > maxRetry { + return // too many retries for this offset with no progress + } + } + if hrs.rc != nil { + if clsErr := hrs.rc.Close(); clsErr != nil { + log.L.WithError(clsErr).Errorf("httpReadSeeker: failed to close ReadCloser") + } + hrs.rc = nil + } + if _, err2 := hrs.reader(); err2 == nil { + return n, nil + } + } + return +} + +func (hrs *httpReadSeeker) Close() error { + if hrs.closed { + return nil + } + hrs.closed = true + if hrs.rc != nil { + return hrs.rc.Close() + } + + return nil +} + +func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) { + if hrs.closed { + return 0, errors.Wrap(errdefs.ErrUnavailable, "Fetcher.Seek: closed") + } + + abs := hrs.offset + switch whence { + case io.SeekStart: + abs = offset + case io.SeekCurrent: + abs += offset + case io.SeekEnd: + if hrs.size == -1 { + return 0, errors.Wrap(errdefs.ErrUnavailable, "Fetcher.Seek: unknown size, cannot seek from end") + } + abs = hrs.size + offset + default: + return 0, errors.Wrap(errdefs.ErrInvalidArgument, "Fetcher.Seek: invalid whence") + } + + if abs < 0 { + return 0, errors.Wrapf(errdefs.ErrInvalidArgument, "Fetcher.Seek: negative offset") + } + + if abs != hrs.offset { + if hrs.rc != nil { + if err := hrs.rc.Close(); err != nil { + log.L.WithError(err).Errorf("Fetcher.Seek: failed to close ReadCloser") + } + + hrs.rc = nil + } + + hrs.offset = abs + } + + return hrs.offset, nil +} + +func (hrs *httpReadSeeker) reader() (io.Reader, error) { + if hrs.rc != nil { + return hrs.rc, nil + } + + if hrs.size == -1 || hrs.offset < hrs.size { + // only try to reopen the body request if we are seeking to a value + // less than the actual size. + if hrs.open == nil { + return nil, errors.Wrapf(errdefs.ErrNotImplemented, "cannot open") + } + + rc, err := hrs.open(hrs.offset) + if err != nil { + return nil, errors.Wrapf(err, "httpReadSeeker: failed open") + } + + if hrs.rc != nil { + if err := hrs.rc.Close(); err != nil { + log.L.WithError(err).Errorf("httpReadSeeker: failed to close ReadCloser") + } + } + hrs.rc = rc + } else { + // There is an edge case here where offset == size of the content. If + // we seek, we will probably get an error for content that cannot be + // sought (?). In that case, we should err on committing the content, + // as the length is already satisfied but we just return the empty + // reader instead. + + hrs.rc = ioutil.NopCloser(bytes.NewReader([]byte{})) + } + + return hrs.rc, nil +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/pusher.go b/vendor/github.com/containerd/containerd/remotes/docker/pusher.go new file mode 100644 index 0000000000..97ed66a6ab --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/pusher.go @@ -0,0 +1,445 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "context" + "io" + "io/ioutil" + "net/http" + "net/url" + "strings" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/remotes" + remoteserrors "github.com/containerd/containerd/remotes/errors" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +type dockerPusher struct { + *dockerBase + object string + + // TODO: namespace tracker + tracker StatusTracker +} + +// Writer implements Ingester API of content store. This allows the client +// to receive ErrUnavailable when there is already an on-going upload. +// Note that the tracker MUST implement StatusTrackLocker interface to avoid +// race condition on StatusTracker. +func (p dockerPusher) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) { + var wOpts content.WriterOpts + for _, opt := range opts { + if err := opt(&wOpts); err != nil { + return nil, err + } + } + if wOpts.Ref == "" { + return nil, errors.Wrap(errdefs.ErrInvalidArgument, "ref must not be empty") + } + return p.push(ctx, wOpts.Desc, wOpts.Ref, true) +} + +func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error) { + return p.push(ctx, desc, remotes.MakeRefKey(ctx, desc), false) +} + +func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref string, unavailableOnFail bool) (content.Writer, error) { + if l, ok := p.tracker.(StatusTrackLocker); ok { + l.Lock(ref) + defer l.Unlock(ref) + } + ctx, err := ContextWithRepositoryScope(ctx, p.refspec, true) + if err != nil { + return nil, err + } + status, err := p.tracker.GetStatus(ref) + if err == nil { + if status.Committed && status.Offset == status.Total { + return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "ref %v", ref) + } + if unavailableOnFail { + // Another push of this ref is happening elsewhere. The rest of function + // will continue only when `errdefs.IsNotFound(err) == true` (i.e. there + // is no actively-tracked ref already). + return nil, errors.Wrap(errdefs.ErrUnavailable, "push is on-going") + } + // TODO: Handle incomplete status + } else if !errdefs.IsNotFound(err) { + return nil, errors.Wrap(err, "failed to get status") + } + + hosts := p.filterHosts(HostCapabilityPush) + if len(hosts) == 0 { + return nil, errors.Wrap(errdefs.ErrNotFound, "no push hosts") + } + + var ( + isManifest bool + existCheck []string + host = hosts[0] + ) + + switch desc.MediaType { + case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList, + ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex: + isManifest = true + existCheck = getManifestPath(p.object, desc.Digest) + default: + existCheck = []string{"blobs", desc.Digest.String()} + } + + req := p.request(host, http.MethodHead, existCheck...) + req.header.Set("Accept", strings.Join([]string{desc.MediaType, `*/*`}, ", ")) + + log.G(ctx).WithField("url", req.String()).Debugf("checking and pushing to") + + resp, err := req.doWithRetries(ctx, nil) + if err != nil { + if !errors.Is(err, ErrInvalidAuthorization) { + return nil, err + } + log.G(ctx).WithError(err).Debugf("Unable to check existence, continuing with push") + } else { + if resp.StatusCode == http.StatusOK { + var exists bool + if isManifest && existCheck[1] != desc.Digest.String() { + dgstHeader := digest.Digest(resp.Header.Get("Docker-Content-Digest")) + if dgstHeader == desc.Digest { + exists = true + } + } else { + exists = true + } + + if exists { + p.tracker.SetStatus(ref, Status{ + Committed: true, + Status: content.Status{ + Ref: ref, + Total: desc.Size, + Offset: desc.Size, + // TODO: Set updated time? + }, + }) + resp.Body.Close() + return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "content %v on remote", desc.Digest) + } + } else if resp.StatusCode != http.StatusNotFound { + err := remoteserrors.NewUnexpectedStatusErr(resp) + log.G(ctx).WithField("resp", resp).WithField("body", string(err.(remoteserrors.ErrUnexpectedStatus).Body)).Debug("unexpected response") + resp.Body.Close() + return nil, err + } + resp.Body.Close() + } + + if isManifest { + putPath := getManifestPath(p.object, desc.Digest) + req = p.request(host, http.MethodPut, putPath...) + req.header.Add("Content-Type", desc.MediaType) + } else { + // Start upload request + req = p.request(host, http.MethodPost, "blobs", "uploads/") + + var resp *http.Response + if fromRepo := selectRepositoryMountCandidate(p.refspec, desc.Annotations); fromRepo != "" { + preq := requestWithMountFrom(req, desc.Digest.String(), fromRepo) + pctx := ContextWithAppendPullRepositoryScope(ctx, fromRepo) + + // NOTE: the fromRepo might be private repo and + // auth service still can grant token without error. + // but the post request will fail because of 401. + // + // for the private repo, we should remove mount-from + // query and send the request again. + resp, err = preq.doWithRetries(pctx, nil) + if err != nil { + return nil, err + } + + if resp.StatusCode == http.StatusUnauthorized { + log.G(ctx).Debugf("failed to mount from repository %s", fromRepo) + + resp.Body.Close() + resp = nil + } + } + + if resp == nil { + resp, err = req.doWithRetries(ctx, nil) + if err != nil { + return nil, err + } + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusOK, http.StatusAccepted, http.StatusNoContent: + case http.StatusCreated: + p.tracker.SetStatus(ref, Status{ + Committed: true, + Status: content.Status{ + Ref: ref, + Total: desc.Size, + Offset: desc.Size, + }, + }) + return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "content %v on remote", desc.Digest) + default: + err := remoteserrors.NewUnexpectedStatusErr(resp) + log.G(ctx).WithField("resp", resp).WithField("body", string(err.(remoteserrors.ErrUnexpectedStatus).Body)).Debug("unexpected response") + return nil, err + } + + var ( + location = resp.Header.Get("Location") + lurl *url.URL + lhost = host + ) + // Support paths without host in location + if strings.HasPrefix(location, "/") { + lurl, err = url.Parse(lhost.Scheme + "://" + lhost.Host + location) + if err != nil { + return nil, errors.Wrapf(err, "unable to parse location %v", location) + } + } else { + if !strings.Contains(location, "://") { + location = lhost.Scheme + "://" + location + } + lurl, err = url.Parse(location) + if err != nil { + return nil, errors.Wrapf(err, "unable to parse location %v", location) + } + + if lurl.Host != lhost.Host || lhost.Scheme != lurl.Scheme { + + lhost.Scheme = lurl.Scheme + lhost.Host = lurl.Host + log.G(ctx).WithField("host", lhost.Host).WithField("scheme", lhost.Scheme).Debug("upload changed destination") + + // Strip authorizer if change to host or scheme + lhost.Authorizer = nil + } + } + q := lurl.Query() + q.Add("digest", desc.Digest.String()) + + req = p.request(lhost, http.MethodPut) + req.header.Set("Content-Type", "application/octet-stream") + req.path = lurl.Path + "?" + q.Encode() + } + p.tracker.SetStatus(ref, Status{ + Status: content.Status{ + Ref: ref, + Total: desc.Size, + Expected: desc.Digest, + StartedAt: time.Now(), + }, + }) + + // TODO: Support chunked upload + + pr, pw := io.Pipe() + respC := make(chan response, 1) + body := ioutil.NopCloser(pr) + + req.body = func() (io.ReadCloser, error) { + if body == nil { + return nil, errors.New("cannot reuse body, request must be retried") + } + // Only use the body once since pipe cannot be seeked + ob := body + body = nil + return ob, nil + } + req.size = desc.Size + + go func() { + defer close(respC) + resp, err := req.doWithRetries(ctx, nil) + if err != nil { + respC <- response{err: err} + pr.CloseWithError(err) + return + } + + switch resp.StatusCode { + case http.StatusOK, http.StatusCreated, http.StatusNoContent: + default: + err := remoteserrors.NewUnexpectedStatusErr(resp) + log.G(ctx).WithField("resp", resp).WithField("body", string(err.(remoteserrors.ErrUnexpectedStatus).Body)).Debug("unexpected response") + pr.CloseWithError(err) + } + respC <- response{Response: resp} + }() + + return &pushWriter{ + base: p.dockerBase, + ref: ref, + pipe: pw, + responseC: respC, + isManifest: isManifest, + expected: desc.Digest, + tracker: p.tracker, + }, nil +} + +func getManifestPath(object string, dgst digest.Digest) []string { + if i := strings.IndexByte(object, '@'); i >= 0 { + if object[i+1:] != dgst.String() { + // use digest, not tag + object = "" + } else { + // strip @ for registry path to make tag + object = object[:i] + } + + } + + if object == "" { + return []string{"manifests", dgst.String()} + } + + return []string{"manifests", object} +} + +type response struct { + *http.Response + err error +} + +type pushWriter struct { + base *dockerBase + ref string + + pipe *io.PipeWriter + responseC <-chan response + isManifest bool + + expected digest.Digest + tracker StatusTracker +} + +func (pw *pushWriter) Write(p []byte) (n int, err error) { + status, err := pw.tracker.GetStatus(pw.ref) + if err != nil { + return n, err + } + n, err = pw.pipe.Write(p) + status.Offset += int64(n) + status.UpdatedAt = time.Now() + pw.tracker.SetStatus(pw.ref, status) + return +} + +func (pw *pushWriter) Close() error { + return pw.pipe.Close() +} + +func (pw *pushWriter) Status() (content.Status, error) { + status, err := pw.tracker.GetStatus(pw.ref) + if err != nil { + return content.Status{}, err + } + return status.Status, nil + +} + +func (pw *pushWriter) Digest() digest.Digest { + // TODO: Get rid of this function? + return pw.expected +} + +func (pw *pushWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error { + // Check whether read has already thrown an error + if _, err := pw.pipe.Write([]byte{}); err != nil && err != io.ErrClosedPipe { + return errors.Wrap(err, "pipe error before commit") + } + + if err := pw.pipe.Close(); err != nil { + return err + } + // TODO: timeout waiting for response + resp := <-pw.responseC + if resp.err != nil { + return resp.err + } + defer resp.Response.Body.Close() + + // 201 is specified return status, some registries return + // 200, 202 or 204. + switch resp.StatusCode { + case http.StatusOK, http.StatusCreated, http.StatusNoContent, http.StatusAccepted: + default: + return remoteserrors.NewUnexpectedStatusErr(resp.Response) + } + + status, err := pw.tracker.GetStatus(pw.ref) + if err != nil { + return errors.Wrap(err, "failed to get status") + } + + if size > 0 && size != status.Offset { + return errors.Errorf("unexpected size %d, expected %d", status.Offset, size) + } + + if expected == "" { + expected = status.Expected + } + + actual, err := digest.Parse(resp.Header.Get("Docker-Content-Digest")) + if err != nil { + return errors.Wrap(err, "invalid content digest in response") + } + + if actual != expected { + return errors.Errorf("got digest %s, expected %s", actual, expected) + } + + status.Committed = true + status.UpdatedAt = time.Now() + pw.tracker.SetStatus(pw.ref, status) + + return nil +} + +func (pw *pushWriter) Truncate(size int64) error { + // TODO: if blob close request and start new request at offset + // TODO: always error on manifest + return errors.New("cannot truncate remote upload") +} + +func requestWithMountFrom(req *request, mount, from string) *request { + creq := *req + + sep := "?" + if strings.Contains(creq.path, sep) { + sep = "&" + } + + creq.path = creq.path + sep + "mount=" + mount + "&from=" + from + + return &creq +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/registry.go b/vendor/github.com/containerd/containerd/remotes/docker/registry.go new file mode 100644 index 0000000000..1e77d4c86c --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/registry.go @@ -0,0 +1,245 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "net" + "net/http" + + "github.com/pkg/errors" +) + +// HostCapabilities represent the capabilities of the registry +// host. This also represents the set of operations for which +// the registry host may be trusted to perform. +// +// For example pushing is a capability which should only be +// performed on an upstream source, not a mirror. +// Resolving (the process of converting a name into a digest) +// must be considered a trusted operation and only done by +// a host which is trusted (or more preferably by secure process +// which can prove the provenance of the mapping). A public +// mirror should never be trusted to do a resolve action. +// +// | Registry Type | Pull | Resolve | Push | +// |------------------|------|---------|------| +// | Public Registry | yes | yes | yes | +// | Private Registry | yes | yes | yes | +// | Public Mirror | yes | no | no | +// | Private Mirror | yes | yes | no | +type HostCapabilities uint8 + +const ( + // HostCapabilityPull represents the capability to fetch manifests + // and blobs by digest + HostCapabilityPull HostCapabilities = 1 << iota + + // HostCapabilityResolve represents the capability to fetch manifests + // by name + HostCapabilityResolve + + // HostCapabilityPush represents the capability to push blobs and + // manifests + HostCapabilityPush + + // Reserved for future capabilities (i.e. search, catalog, remove) +) + +// Has checks whether the capabilities list has the provide capability +func (c HostCapabilities) Has(t HostCapabilities) bool { + return c&t == t +} + +// RegistryHost represents a complete configuration for a registry +// host, representing the capabilities, authorizations, connection +// configuration, and location. +type RegistryHost struct { + Client *http.Client + Authorizer Authorizer + Host string + Scheme string + Path string + Capabilities HostCapabilities + Header http.Header +} + +func (h RegistryHost) isProxy(refhost string) bool { + if refhost != h.Host { + if refhost != "docker.io" || h.Host != "registry-1.docker.io" { + return true + } + } + return false +} + +// RegistryHosts fetches the registry hosts for a given namespace, +// provided by the host component of an distribution image reference. +type RegistryHosts func(string) ([]RegistryHost, error) + +// Registries joins multiple registry configuration functions, using the same +// order as provided within the arguments. When an empty registry configuration +// is returned with a nil error, the next function will be called. +// NOTE: This function will not join configurations, as soon as a non-empty +// configuration is returned from a configuration function, it will be returned +// to the caller. +func Registries(registries ...RegistryHosts) RegistryHosts { + return func(host string) ([]RegistryHost, error) { + for _, registry := range registries { + config, err := registry(host) + if err != nil { + return config, err + } + if len(config) > 0 { + return config, nil + } + } + return nil, nil + } +} + +type registryOpts struct { + authorizer Authorizer + plainHTTP func(string) (bool, error) + host func(string) (string, error) + client *http.Client +} + +// RegistryOpt defines a registry default option +type RegistryOpt func(*registryOpts) + +// WithPlainHTTP configures registries to use plaintext http scheme +// for the provided host match function. +func WithPlainHTTP(f func(string) (bool, error)) RegistryOpt { + return func(opts *registryOpts) { + opts.plainHTTP = f + } +} + +// WithAuthorizer configures the default authorizer for a registry +func WithAuthorizer(a Authorizer) RegistryOpt { + return func(opts *registryOpts) { + opts.authorizer = a + } +} + +// WithHostTranslator defines the default translator to use for registry hosts +func WithHostTranslator(h func(string) (string, error)) RegistryOpt { + return func(opts *registryOpts) { + opts.host = h + } +} + +// WithClient configures the default http client for a registry +func WithClient(c *http.Client) RegistryOpt { + return func(opts *registryOpts) { + opts.client = c + } +} + +// ConfigureDefaultRegistries is used to create a default configuration for +// registries. For more advanced configurations or per-domain setups, +// the RegistryHosts interface should be used directly. +// NOTE: This function will always return a non-empty value or error +func ConfigureDefaultRegistries(ropts ...RegistryOpt) RegistryHosts { + var opts registryOpts + for _, opt := range ropts { + opt(&opts) + } + + return func(host string) ([]RegistryHost, error) { + config := RegistryHost{ + Client: opts.client, + Authorizer: opts.authorizer, + Host: host, + Scheme: "https", + Path: "/v2", + Capabilities: HostCapabilityPull | HostCapabilityResolve | HostCapabilityPush, + } + + if config.Client == nil { + config.Client = http.DefaultClient + } + + if opts.plainHTTP != nil { + match, err := opts.plainHTTP(host) + if err != nil { + return nil, err + } + if match { + config.Scheme = "http" + } + } + + if opts.host != nil { + var err error + config.Host, err = opts.host(config.Host) + if err != nil { + return nil, err + } + } else if host == "docker.io" { + config.Host = "registry-1.docker.io" + } + + return []RegistryHost{config}, nil + } +} + +// MatchAllHosts is a host match function which is always true. +func MatchAllHosts(string) (bool, error) { + return true, nil +} + +// MatchLocalhost is a host match function which returns true for +// localhost. +// +// Note: this does not handle matching of ip addresses in octal, +// decimal or hex form. +func MatchLocalhost(host string) (bool, error) { + switch { + case host == "::1": + return true, nil + case host == "[::1]": + return true, nil + } + h, p, err := net.SplitHostPort(host) + + // addrError helps distinguish between errors of form + // "no colon in address" and "too many colons in address". + // The former is fine as the host string need not have a + // port. Latter needs to be handled. + addrError := &net.AddrError{ + Err: "missing port in address", + Addr: host, + } + if err != nil { + if err.Error() != addrError.Error() { + return false, err + } + // host string without any port specified + h = host + } else if len(p) == 0 { + return false, errors.New("invalid host name format") + } + + // use ipv4 dotted decimal for further checking + if h == "localhost" { + h = "127.0.0.1" + } + ip := net.ParseIP(h) + + return ip.IsLoopback(), nil +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/resolver.go b/vendor/github.com/containerd/containerd/remotes/docker/resolver.go new file mode 100644 index 0000000000..1be9e1d05c --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/resolver.go @@ -0,0 +1,667 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "context" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "path" + "strings" + + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/reference" + "github.com/containerd/containerd/remotes" + "github.com/containerd/containerd/remotes/docker/schema1" + "github.com/containerd/containerd/version" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/net/context/ctxhttp" +) + +var ( + // ErrInvalidAuthorization is used when credentials are passed to a server but + // those credentials are rejected. + ErrInvalidAuthorization = errors.New("authorization failed") + + // MaxManifestSize represents the largest size accepted from a registry + // during resolution. Larger manifests may be accepted using a + // resolution method other than the registry. + // + // NOTE: The max supported layers by some runtimes is 128 and individual + // layers will not contribute more than 256 bytes, making a + // reasonable limit for a large image manifests of 32K bytes. + // 4M bytes represents a much larger upper bound for images which may + // contain large annotations or be non-images. A proper manifest + // design puts large metadata in subobjects, as is consistent the + // intent of the manifest design. + MaxManifestSize int64 = 4 * 1048 * 1048 +) + +// Authorizer is used to authorize HTTP requests based on 401 HTTP responses. +// An Authorizer is responsible for caching tokens or credentials used by +// requests. +type Authorizer interface { + // Authorize sets the appropriate `Authorization` header on the given + // request. + // + // If no authorization is found for the request, the request remains + // unmodified. It may also add an `Authorization` header as + // "bearer " + // "basic " + Authorize(context.Context, *http.Request) error + + // AddResponses adds a 401 response for the authorizer to consider when + // authorizing requests. The last response should be unauthorized and + // the previous requests are used to consider redirects and retries + // that may have led to the 401. + // + // If response is not handled, returns `ErrNotImplemented` + AddResponses(context.Context, []*http.Response) error +} + +// ResolverOptions are used to configured a new Docker register resolver +type ResolverOptions struct { + // Hosts returns registry host configurations for a namespace. + Hosts RegistryHosts + + // Headers are the HTTP request header fields sent by the resolver + Headers http.Header + + // Tracker is used to track uploads to the registry. This is used + // since the registry does not have upload tracking and the existing + // mechanism for getting blob upload status is expensive. + Tracker StatusTracker + + // Authorizer is used to authorize registry requests + // Deprecated: use Hosts + Authorizer Authorizer + + // Credentials provides username and secret given a host. + // If username is empty but a secret is given, that secret + // is interpreted as a long lived token. + // Deprecated: use Hosts + Credentials func(string) (string, string, error) + + // Host provides the hostname given a namespace. + // Deprecated: use Hosts + Host func(string) (string, error) + + // PlainHTTP specifies to use plain http and not https + // Deprecated: use Hosts + PlainHTTP bool + + // Client is the http client to used when making registry requests + // Deprecated: use Hosts + Client *http.Client +} + +// DefaultHost is the default host function. +func DefaultHost(ns string) (string, error) { + if ns == "docker.io" { + return "registry-1.docker.io", nil + } + return ns, nil +} + +type dockerResolver struct { + hosts RegistryHosts + header http.Header + resolveHeader http.Header + tracker StatusTracker +} + +// NewResolver returns a new resolver to a Docker registry +func NewResolver(options ResolverOptions) remotes.Resolver { + if options.Tracker == nil { + options.Tracker = NewInMemoryTracker() + } + + if options.Headers == nil { + options.Headers = make(http.Header) + } + if _, ok := options.Headers["User-Agent"]; !ok { + options.Headers.Set("User-Agent", "containerd/"+version.Version) + } + + resolveHeader := http.Header{} + if _, ok := options.Headers["Accept"]; !ok { + // set headers for all the types we support for resolution. + resolveHeader.Set("Accept", strings.Join([]string{ + images.MediaTypeDockerSchema2Manifest, + images.MediaTypeDockerSchema2ManifestList, + ocispec.MediaTypeImageManifest, + ocispec.MediaTypeImageIndex, "*/*"}, ", ")) + } else { + resolveHeader["Accept"] = options.Headers["Accept"] + delete(options.Headers, "Accept") + } + + if options.Hosts == nil { + opts := []RegistryOpt{} + if options.Host != nil { + opts = append(opts, WithHostTranslator(options.Host)) + } + + if options.Authorizer == nil { + options.Authorizer = NewDockerAuthorizer( + WithAuthClient(options.Client), + WithAuthHeader(options.Headers), + WithAuthCreds(options.Credentials)) + } + opts = append(opts, WithAuthorizer(options.Authorizer)) + + if options.Client != nil { + opts = append(opts, WithClient(options.Client)) + } + if options.PlainHTTP { + opts = append(opts, WithPlainHTTP(MatchAllHosts)) + } else { + opts = append(opts, WithPlainHTTP(MatchLocalhost)) + } + options.Hosts = ConfigureDefaultRegistries(opts...) + } + return &dockerResolver{ + hosts: options.Hosts, + header: options.Headers, + resolveHeader: resolveHeader, + tracker: options.Tracker, + } +} + +func getManifestMediaType(resp *http.Response) string { + // Strip encoding data (manifests should always be ascii JSON) + contentType := resp.Header.Get("Content-Type") + if sp := strings.IndexByte(contentType, ';'); sp != -1 { + contentType = contentType[0:sp] + } + + // As of Apr 30 2019 the registry.access.redhat.com registry does not specify + // the content type of any data but uses schema1 manifests. + if contentType == "text/plain" { + contentType = images.MediaTypeDockerSchema1Manifest + } + return contentType +} + +type countingReader struct { + reader io.Reader + bytesRead int64 +} + +func (r *countingReader) Read(p []byte) (int, error) { + n, err := r.reader.Read(p) + r.bytesRead += int64(n) + return n, err +} + +var _ remotes.Resolver = &dockerResolver{} + +func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocispec.Descriptor, error) { + base, err := r.resolveDockerBase(ref) + if err != nil { + return "", ocispec.Descriptor{}, err + } + refspec := base.refspec + if refspec.Object == "" { + return "", ocispec.Descriptor{}, reference.ErrObjectRequired + } + + var ( + firstErr error + paths [][]string + dgst = refspec.Digest() + caps = HostCapabilityPull + ) + + if dgst != "" { + if err := dgst.Validate(); err != nil { + // need to fail here, since we can't actually resolve the invalid + // digest. + return "", ocispec.Descriptor{}, err + } + + // turns out, we have a valid digest, make a url. + paths = append(paths, []string{"manifests", dgst.String()}) + + // fallback to blobs on not found. + paths = append(paths, []string{"blobs", dgst.String()}) + } else { + // Add + paths = append(paths, []string{"manifests", refspec.Object}) + caps |= HostCapabilityResolve + } + + hosts := base.filterHosts(caps) + if len(hosts) == 0 { + return "", ocispec.Descriptor{}, errors.Wrap(errdefs.ErrNotFound, "no resolve hosts") + } + + ctx, err = ContextWithRepositoryScope(ctx, refspec, false) + if err != nil { + return "", ocispec.Descriptor{}, err + } + + for _, u := range paths { + for _, host := range hosts { + ctx := log.WithLogger(ctx, log.G(ctx).WithField("host", host.Host)) + + req := base.request(host, http.MethodHead, u...) + if err := req.addNamespace(base.refspec.Hostname()); err != nil { + return "", ocispec.Descriptor{}, err + } + + for key, value := range r.resolveHeader { + req.header[key] = append(req.header[key], value...) + } + + log.G(ctx).Debug("resolving") + resp, err := req.doWithRetries(ctx, nil) + if err != nil { + if errors.Is(err, ErrInvalidAuthorization) { + err = errors.Wrapf(err, "pull access denied, repository does not exist or may require authorization") + } + // Store the error for referencing later + if firstErr == nil { + firstErr = err + } + log.G(ctx).WithError(err).Info("trying next host") + continue // try another host + } + resp.Body.Close() // don't care about body contents. + + if resp.StatusCode > 299 { + if resp.StatusCode == http.StatusNotFound { + log.G(ctx).Info("trying next host - response was http.StatusNotFound") + continue + } + if resp.StatusCode > 399 { + // Set firstErr when encountering the first non-404 status code. + if firstErr == nil { + firstErr = errors.Errorf("pulling from host %s failed with status code %v: %v", host.Host, u, resp.Status) + } + continue // try another host + } + return "", ocispec.Descriptor{}, errors.Errorf("pulling from host %s failed with unexpected status code %v: %v", host.Host, u, resp.Status) + } + size := resp.ContentLength + contentType := getManifestMediaType(resp) + + // if no digest was provided, then only a resolve + // trusted registry was contacted, in this case use + // the digest header (or content from GET) + if dgst == "" { + // this is the only point at which we trust the registry. we use the + // content headers to assemble a descriptor for the name. when this becomes + // more robust, we mostly get this information from a secure trust store. + dgstHeader := digest.Digest(resp.Header.Get("Docker-Content-Digest")) + + if dgstHeader != "" && size != -1 { + if err := dgstHeader.Validate(); err != nil { + return "", ocispec.Descriptor{}, errors.Wrapf(err, "%q in header not a valid digest", dgstHeader) + } + dgst = dgstHeader + } + } + if dgst == "" || size == -1 { + log.G(ctx).Debug("no Docker-Content-Digest header, fetching manifest instead") + + req = base.request(host, http.MethodGet, u...) + if err := req.addNamespace(base.refspec.Hostname()); err != nil { + return "", ocispec.Descriptor{}, err + } + + for key, value := range r.resolveHeader { + req.header[key] = append(req.header[key], value...) + } + + resp, err := req.doWithRetries(ctx, nil) + if err != nil { + return "", ocispec.Descriptor{}, err + } + defer resp.Body.Close() + + bodyReader := countingReader{reader: resp.Body} + + contentType = getManifestMediaType(resp) + if dgst == "" { + if contentType == images.MediaTypeDockerSchema1Manifest { + b, err := schema1.ReadStripSignature(&bodyReader) + if err != nil { + return "", ocispec.Descriptor{}, err + } + + dgst = digest.FromBytes(b) + } else { + dgst, err = digest.FromReader(&bodyReader) + if err != nil { + return "", ocispec.Descriptor{}, err + } + } + } else if _, err := io.Copy(ioutil.Discard, &bodyReader); err != nil { + return "", ocispec.Descriptor{}, err + } + size = bodyReader.bytesRead + } + // Prevent resolving to excessively large manifests + if size > MaxManifestSize { + if firstErr == nil { + firstErr = errors.Wrapf(errdefs.ErrNotFound, "rejecting %d byte manifest for %s", size, ref) + } + continue + } + + desc := ocispec.Descriptor{ + Digest: dgst, + MediaType: contentType, + Size: size, + } + + log.G(ctx).WithField("desc.digest", desc.Digest).Debug("resolved") + return ref, desc, nil + } + } + + // If above loop terminates without return, then there was an error. + // "firstErr" contains the first non-404 error. That is, "firstErr == nil" + // means that either no registries were given or each registry returned 404. + + if firstErr == nil { + firstErr = errors.Wrap(errdefs.ErrNotFound, ref) + } + + return "", ocispec.Descriptor{}, firstErr +} + +func (r *dockerResolver) Fetcher(ctx context.Context, ref string) (remotes.Fetcher, error) { + base, err := r.resolveDockerBase(ref) + if err != nil { + return nil, err + } + + return dockerFetcher{ + dockerBase: base, + }, nil +} + +func (r *dockerResolver) Pusher(ctx context.Context, ref string) (remotes.Pusher, error) { + base, err := r.resolveDockerBase(ref) + if err != nil { + return nil, err + } + + return dockerPusher{ + dockerBase: base, + object: base.refspec.Object, + tracker: r.tracker, + }, nil +} + +func (r *dockerResolver) resolveDockerBase(ref string) (*dockerBase, error) { + refspec, err := reference.Parse(ref) + if err != nil { + return nil, err + } + + return r.base(refspec) +} + +type dockerBase struct { + refspec reference.Spec + repository string + hosts []RegistryHost + header http.Header +} + +func (r *dockerResolver) base(refspec reference.Spec) (*dockerBase, error) { + host := refspec.Hostname() + hosts, err := r.hosts(host) + if err != nil { + return nil, err + } + return &dockerBase{ + refspec: refspec, + repository: strings.TrimPrefix(refspec.Locator, host+"/"), + hosts: hosts, + header: r.header, + }, nil +} + +func (r *dockerBase) filterHosts(caps HostCapabilities) (hosts []RegistryHost) { + for _, host := range r.hosts { + if host.Capabilities.Has(caps) { + hosts = append(hosts, host) + } + } + return +} + +func (r *dockerBase) request(host RegistryHost, method string, ps ...string) *request { + header := r.header.Clone() + if header == nil { + header = http.Header{} + } + + for key, value := range host.Header { + header[key] = append(header[key], value...) + } + parts := append([]string{"/", host.Path, r.repository}, ps...) + p := path.Join(parts...) + // Join strips trailing slash, re-add ending "/" if included + if len(parts) > 0 && strings.HasSuffix(parts[len(parts)-1], "/") { + p = p + "/" + } + return &request{ + method: method, + path: p, + header: header, + host: host, + } +} + +func (r *request) authorize(ctx context.Context, req *http.Request) error { + // Check if has header for host + if r.host.Authorizer != nil { + if err := r.host.Authorizer.Authorize(ctx, req); err != nil { + return err + } + } + + return nil +} + +func (r *request) addNamespace(ns string) (err error) { + if !r.host.isProxy(ns) { + return nil + } + var q url.Values + // Parse query + if i := strings.IndexByte(r.path, '?'); i > 0 { + r.path = r.path[:i+1] + q, err = url.ParseQuery(r.path[i+1:]) + if err != nil { + return + } + } else { + r.path = r.path + "?" + q = url.Values{} + } + q.Add("ns", ns) + + r.path = r.path + q.Encode() + + return +} + +type request struct { + method string + path string + header http.Header + host RegistryHost + body func() (io.ReadCloser, error) + size int64 +} + +func (r *request) do(ctx context.Context) (*http.Response, error) { + u := r.host.Scheme + "://" + r.host.Host + r.path + req, err := http.NewRequest(r.method, u, nil) + if err != nil { + return nil, err + } + req.Header = http.Header{} // headers need to be copied to avoid concurrent map access + for k, v := range r.header { + req.Header[k] = v + } + if r.body != nil { + body, err := r.body() + if err != nil { + return nil, err + } + req.Body = body + req.GetBody = r.body + if r.size > 0 { + req.ContentLength = r.size + } + } + + ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", u)) + log.G(ctx).WithFields(requestFields(req)).Debug("do request") + if err := r.authorize(ctx, req); err != nil { + return nil, errors.Wrap(err, "failed to authorize") + } + + var client = &http.Client{} + if r.host.Client != nil { + *client = *r.host.Client + } + if client.CheckRedirect == nil { + client.CheckRedirect = func(req *http.Request, via []*http.Request) error { + if len(via) >= 10 { + return errors.New("stopped after 10 redirects") + } + return errors.Wrap(r.authorize(ctx, req), "failed to authorize redirect") + } + } + + resp, err := ctxhttp.Do(ctx, client, req) + if err != nil { + return nil, errors.Wrap(err, "failed to do request") + } + log.G(ctx).WithFields(responseFields(resp)).Debug("fetch response received") + return resp, nil +} + +func (r *request) doWithRetries(ctx context.Context, responses []*http.Response) (*http.Response, error) { + resp, err := r.do(ctx) + if err != nil { + return nil, err + } + + responses = append(responses, resp) + retry, err := r.retryRequest(ctx, responses) + if err != nil { + resp.Body.Close() + return nil, err + } + if retry { + resp.Body.Close() + return r.doWithRetries(ctx, responses) + } + return resp, err +} + +func (r *request) retryRequest(ctx context.Context, responses []*http.Response) (bool, error) { + if len(responses) > 5 { + return false, nil + } + last := responses[len(responses)-1] + switch last.StatusCode { + case http.StatusUnauthorized: + log.G(ctx).WithField("header", last.Header.Get("WWW-Authenticate")).Debug("Unauthorized") + if r.host.Authorizer != nil { + if err := r.host.Authorizer.AddResponses(ctx, responses); err == nil { + return true, nil + } else if !errdefs.IsNotImplemented(err) { + return false, err + } + } + + return false, nil + case http.StatusMethodNotAllowed: + // Support registries which have not properly implemented the HEAD method for + // manifests endpoint + if r.method == http.MethodHead && strings.Contains(r.path, "/manifests/") { + r.method = http.MethodGet + return true, nil + } + case http.StatusRequestTimeout, http.StatusTooManyRequests: + return true, nil + } + + // TODO: Handle 50x errors accounting for attempt history + return false, nil +} + +func (r *request) String() string { + return r.host.Scheme + "://" + r.host.Host + r.path +} + +func requestFields(req *http.Request) logrus.Fields { + fields := map[string]interface{}{ + "request.method": req.Method, + } + for k, vals := range req.Header { + k = strings.ToLower(k) + if k == "authorization" { + continue + } + for i, v := range vals { + field := "request.header." + k + if i > 0 { + field = fmt.Sprintf("%s.%d", field, i) + } + fields[field] = v + } + } + + return logrus.Fields(fields) +} + +func responseFields(resp *http.Response) logrus.Fields { + fields := map[string]interface{}{ + "response.status": resp.Status, + } + for k, vals := range resp.Header { + k = strings.ToLower(k) + for i, v := range vals { + field := "response.header." + k + if i > 0 { + field = fmt.Sprintf("%s.%d", field, i) + } + fields[field] = v + } + } + + return logrus.Fields(fields) +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go b/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go new file mode 100644 index 0000000000..f15a9acf3e --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go @@ -0,0 +1,606 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package schema1 + +import ( + "bytes" + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "strconv" + "strings" + "sync" + "time" + + "golang.org/x/sync/errgroup" + + "github.com/containerd/containerd/archive/compression" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/remotes" + digest "github.com/opencontainers/go-digest" + specs "github.com/opencontainers/image-spec/specs-go" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +const ( + manifestSizeLimit = 8e6 // 8MB + labelDockerSchema1EmptyLayer = "containerd.io/docker.schema1.empty-layer" +) + +type blobState struct { + diffID digest.Digest + empty bool +} + +// Converter converts schema1 manifests to schema2 on fetch +type Converter struct { + contentStore content.Store + fetcher remotes.Fetcher + + pulledManifest *manifest + + mu sync.Mutex + blobMap map[digest.Digest]blobState + layerBlobs map[digest.Digest]ocispec.Descriptor +} + +// NewConverter returns a new converter +func NewConverter(contentStore content.Store, fetcher remotes.Fetcher) *Converter { + return &Converter{ + contentStore: contentStore, + fetcher: fetcher, + blobMap: map[digest.Digest]blobState{}, + layerBlobs: map[digest.Digest]ocispec.Descriptor{}, + } +} + +// Handle fetching descriptors for a docker media type +func (c *Converter) Handle(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + switch desc.MediaType { + case images.MediaTypeDockerSchema1Manifest: + if err := c.fetchManifest(ctx, desc); err != nil { + return nil, err + } + + m := c.pulledManifest + if len(m.FSLayers) != len(m.History) { + return nil, errors.New("invalid schema 1 manifest, history and layer mismatch") + } + descs := make([]ocispec.Descriptor, 0, len(c.pulledManifest.FSLayers)) + + for i := range m.FSLayers { + if _, ok := c.blobMap[c.pulledManifest.FSLayers[i].BlobSum]; !ok { + empty, err := isEmptyLayer([]byte(m.History[i].V1Compatibility)) + if err != nil { + return nil, err + } + + // Do no attempt to download a known empty blob + if !empty { + descs = append([]ocispec.Descriptor{ + { + MediaType: images.MediaTypeDockerSchema2LayerGzip, + Digest: c.pulledManifest.FSLayers[i].BlobSum, + Size: -1, + }, + }, descs...) + } + c.blobMap[c.pulledManifest.FSLayers[i].BlobSum] = blobState{ + empty: empty, + } + } + } + return descs, nil + case images.MediaTypeDockerSchema2LayerGzip: + if c.pulledManifest == nil { + return nil, errors.New("manifest required for schema 1 blob pull") + } + return nil, c.fetchBlob(ctx, desc) + default: + return nil, fmt.Errorf("%v not support for schema 1 manifests", desc.MediaType) + } +} + +// ConvertOptions provides options on converting a docker schema1 manifest. +type ConvertOptions struct { + // ManifestMediaType specifies the media type of the manifest OCI descriptor. + ManifestMediaType string + + // ConfigMediaType specifies the media type of the manifest config OCI + // descriptor. + ConfigMediaType string +} + +// ConvertOpt allows configuring a convert operation. +type ConvertOpt func(context.Context, *ConvertOptions) error + +// UseDockerSchema2 is used to indicate that a schema1 manifest should be +// converted into the media types for a docker schema2 manifest. +func UseDockerSchema2() ConvertOpt { + return func(ctx context.Context, o *ConvertOptions) error { + o.ManifestMediaType = images.MediaTypeDockerSchema2Manifest + o.ConfigMediaType = images.MediaTypeDockerSchema2Config + return nil + } +} + +// Convert a docker manifest to an OCI descriptor +func (c *Converter) Convert(ctx context.Context, opts ...ConvertOpt) (ocispec.Descriptor, error) { + co := ConvertOptions{ + ManifestMediaType: ocispec.MediaTypeImageManifest, + ConfigMediaType: ocispec.MediaTypeImageConfig, + } + for _, opt := range opts { + if err := opt(ctx, &co); err != nil { + return ocispec.Descriptor{}, err + } + } + + history, diffIDs, err := c.schema1ManifestHistory() + if err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "schema 1 conversion failed") + } + + var img ocispec.Image + if err := json.Unmarshal([]byte(c.pulledManifest.History[0].V1Compatibility), &img); err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to unmarshal image from schema 1 history") + } + + img.History = history + img.RootFS = ocispec.RootFS{ + Type: "layers", + DiffIDs: diffIDs, + } + + b, err := json.MarshalIndent(img, "", " ") + if err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to marshal image") + } + + config := ocispec.Descriptor{ + MediaType: co.ConfigMediaType, + Digest: digest.Canonical.FromBytes(b), + Size: int64(len(b)), + } + + layers := make([]ocispec.Descriptor, len(diffIDs)) + for i, diffID := range diffIDs { + layers[i] = c.layerBlobs[diffID] + } + + manifest := ocispec.Manifest{ + Versioned: specs.Versioned{ + SchemaVersion: 2, + }, + Config: config, + Layers: layers, + } + + mb, err := json.MarshalIndent(manifest, "", " ") + if err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to marshal image") + } + + desc := ocispec.Descriptor{ + MediaType: co.ManifestMediaType, + Digest: digest.Canonical.FromBytes(mb), + Size: int64(len(mb)), + } + + labels := map[string]string{} + labels["containerd.io/gc.ref.content.0"] = manifest.Config.Digest.String() + for i, ch := range manifest.Layers { + labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i+1)] = ch.Digest.String() + } + + ref := remotes.MakeRefKey(ctx, desc) + if err := content.WriteBlob(ctx, c.contentStore, ref, bytes.NewReader(mb), desc, content.WithLabels(labels)); err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to write image manifest") + } + + ref = remotes.MakeRefKey(ctx, config) + if err := content.WriteBlob(ctx, c.contentStore, ref, bytes.NewReader(b), config); err != nil { + return ocispec.Descriptor{}, errors.Wrap(err, "failed to write image config") + } + + return desc, nil +} + +// ReadStripSignature reads in a schema1 manifest and returns a byte array +// with the "signatures" field stripped +func ReadStripSignature(schema1Blob io.Reader) ([]byte, error) { + b, err := ioutil.ReadAll(io.LimitReader(schema1Blob, manifestSizeLimit)) // limit to 8MB + if err != nil { + return nil, err + } + + return stripSignature(b) +} + +func (c *Converter) fetchManifest(ctx context.Context, desc ocispec.Descriptor) error { + log.G(ctx).Debug("fetch schema 1") + + rc, err := c.fetcher.Fetch(ctx, desc) + if err != nil { + return err + } + + b, err := ReadStripSignature(rc) + rc.Close() + if err != nil { + return err + } + + var m manifest + if err := json.Unmarshal(b, &m); err != nil { + return err + } + if len(m.Manifests) != 0 || len(m.Layers) != 0 { + return errors.New("converter: expected schema1 document but found extra keys") + } + c.pulledManifest = &m + + return nil +} + +func (c *Converter) fetchBlob(ctx context.Context, desc ocispec.Descriptor) error { + log.G(ctx).Debug("fetch blob") + + var ( + ref = remotes.MakeRefKey(ctx, desc) + calc = newBlobStateCalculator() + compressMethod = compression.Gzip + ) + + // size may be unknown, set to zero for content ingest + ingestDesc := desc + if ingestDesc.Size == -1 { + ingestDesc.Size = 0 + } + + cw, err := content.OpenWriter(ctx, c.contentStore, content.WithRef(ref), content.WithDescriptor(ingestDesc)) + if err != nil { + if !errdefs.IsAlreadyExists(err) { + return err + } + + reuse, err := c.reuseLabelBlobState(ctx, desc) + if err != nil { + return err + } + + if reuse { + return nil + } + + ra, err := c.contentStore.ReaderAt(ctx, desc) + if err != nil { + return err + } + defer ra.Close() + + r, err := compression.DecompressStream(content.NewReader(ra)) + if err != nil { + return err + } + + compressMethod = r.GetCompression() + _, err = io.Copy(calc, r) + r.Close() + if err != nil { + return err + } + } else { + defer cw.Close() + + rc, err := c.fetcher.Fetch(ctx, desc) + if err != nil { + return err + } + defer rc.Close() + + eg, _ := errgroup.WithContext(ctx) + pr, pw := io.Pipe() + + eg.Go(func() error { + r, err := compression.DecompressStream(pr) + if err != nil { + return err + } + + compressMethod = r.GetCompression() + _, err = io.Copy(calc, r) + r.Close() + pr.CloseWithError(err) + return err + }) + + eg.Go(func() error { + defer pw.Close() + + return content.Copy(ctx, cw, io.TeeReader(rc, pw), ingestDesc.Size, ingestDesc.Digest) + }) + + if err := eg.Wait(); err != nil { + return err + } + } + + if desc.Size == -1 { + info, err := c.contentStore.Info(ctx, desc.Digest) + if err != nil { + return errors.Wrap(err, "failed to get blob info") + } + desc.Size = info.Size + } + + if compressMethod == compression.Uncompressed { + log.G(ctx).WithField("id", desc.Digest).Debugf("changed media type for uncompressed schema1 layer blob") + desc.MediaType = images.MediaTypeDockerSchema2Layer + } + + state := calc.State() + + cinfo := content.Info{ + Digest: desc.Digest, + Labels: map[string]string{ + "containerd.io/uncompressed": state.diffID.String(), + labelDockerSchema1EmptyLayer: strconv.FormatBool(state.empty), + }, + } + + if _, err := c.contentStore.Update(ctx, cinfo, "labels.containerd.io/uncompressed", fmt.Sprintf("labels.%s", labelDockerSchema1EmptyLayer)); err != nil { + return errors.Wrap(err, "failed to update uncompressed label") + } + + c.mu.Lock() + c.blobMap[desc.Digest] = state + c.layerBlobs[state.diffID] = desc + c.mu.Unlock() + + return nil +} + +func (c *Converter) reuseLabelBlobState(ctx context.Context, desc ocispec.Descriptor) (bool, error) { + cinfo, err := c.contentStore.Info(ctx, desc.Digest) + if err != nil { + return false, errors.Wrap(err, "failed to get blob info") + } + desc.Size = cinfo.Size + + diffID, ok := cinfo.Labels["containerd.io/uncompressed"] + if !ok { + return false, nil + } + + emptyVal, ok := cinfo.Labels[labelDockerSchema1EmptyLayer] + if !ok { + return false, nil + } + + isEmpty, err := strconv.ParseBool(emptyVal) + if err != nil { + log.G(ctx).WithField("id", desc.Digest).Warnf("failed to parse bool from label %s: %v", labelDockerSchema1EmptyLayer, isEmpty) + return false, nil + } + + bState := blobState{empty: isEmpty} + + if bState.diffID, err = digest.Parse(diffID); err != nil { + log.G(ctx).WithField("id", desc.Digest).Warnf("failed to parse digest from label containerd.io/uncompressed: %v", diffID) + return false, nil + } + + // NOTE: there is no need to read header to get compression method + // because there are only two kinds of methods. + if bState.diffID == desc.Digest { + desc.MediaType = images.MediaTypeDockerSchema2Layer + } else { + desc.MediaType = images.MediaTypeDockerSchema2LayerGzip + } + + c.mu.Lock() + c.blobMap[desc.Digest] = bState + c.layerBlobs[bState.diffID] = desc + c.mu.Unlock() + return true, nil +} + +func (c *Converter) schema1ManifestHistory() ([]ocispec.History, []digest.Digest, error) { + if c.pulledManifest == nil { + return nil, nil, errors.New("missing schema 1 manifest for conversion") + } + m := *c.pulledManifest + + if len(m.History) == 0 { + return nil, nil, errors.New("no history") + } + + history := make([]ocispec.History, len(m.History)) + diffIDs := []digest.Digest{} + for i := range m.History { + var h v1History + if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), &h); err != nil { + return nil, nil, errors.Wrap(err, "failed to unmarshal history") + } + + blobSum := m.FSLayers[i].BlobSum + + state := c.blobMap[blobSum] + + history[len(history)-i-1] = ocispec.History{ + Author: h.Author, + Comment: h.Comment, + Created: &h.Created, + CreatedBy: strings.Join(h.ContainerConfig.Cmd, " "), + EmptyLayer: state.empty, + } + + if !state.empty { + diffIDs = append([]digest.Digest{state.diffID}, diffIDs...) + + } + } + + return history, diffIDs, nil +} + +type fsLayer struct { + BlobSum digest.Digest `json:"blobSum"` +} + +type history struct { + V1Compatibility string `json:"v1Compatibility"` +} + +type manifest struct { + FSLayers []fsLayer `json:"fsLayers"` + History []history `json:"history"` + Layers json.RawMessage `json:"layers,omitempty"` // OCI manifest + Manifests json.RawMessage `json:"manifests,omitempty"` // OCI index +} + +type v1History struct { + Author string `json:"author,omitempty"` + Created time.Time `json:"created"` + Comment string `json:"comment,omitempty"` + ThrowAway *bool `json:"throwaway,omitempty"` + Size *int `json:"Size,omitempty"` // used before ThrowAway field + ContainerConfig struct { + Cmd []string `json:"Cmd,omitempty"` + } `json:"container_config,omitempty"` +} + +// isEmptyLayer returns whether the v1 compatibility history describes an +// empty layer. A return value of true indicates the layer is empty, +// however false does not indicate non-empty. +func isEmptyLayer(compatHistory []byte) (bool, error) { + var h v1History + if err := json.Unmarshal(compatHistory, &h); err != nil { + return false, err + } + + if h.ThrowAway != nil { + return *h.ThrowAway, nil + } + if h.Size != nil { + return *h.Size == 0, nil + } + + // If no `Size` or `throwaway` field is given, then + // it cannot be determined whether the layer is empty + // from the history, return false + return false, nil +} + +type signature struct { + Signatures []jsParsedSignature `json:"signatures"` +} + +type jsParsedSignature struct { + Protected string `json:"protected"` +} + +type protectedBlock struct { + Length int `json:"formatLength"` + Tail string `json:"formatTail"` +} + +// joseBase64UrlDecode decodes the given string using the standard base64 url +// decoder but first adds the appropriate number of trailing '=' characters in +// accordance with the jose specification. +// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 +func joseBase64UrlDecode(s string) ([]byte, error) { + switch len(s) % 4 { + case 0: + case 2: + s += "==" + case 3: + s += "=" + default: + return nil, errors.New("illegal base64url string") + } + return base64.URLEncoding.DecodeString(s) +} + +func stripSignature(b []byte) ([]byte, error) { + var sig signature + if err := json.Unmarshal(b, &sig); err != nil { + return nil, err + } + if len(sig.Signatures) == 0 { + return nil, errors.New("no signatures") + } + pb, err := joseBase64UrlDecode(sig.Signatures[0].Protected) + if err != nil { + return nil, errors.Wrapf(err, "could not decode %s", sig.Signatures[0].Protected) + } + + var protected protectedBlock + if err := json.Unmarshal(pb, &protected); err != nil { + return nil, err + } + + if protected.Length > len(b) { + return nil, errors.New("invalid protected length block") + } + + tail, err := joseBase64UrlDecode(protected.Tail) + if err != nil { + return nil, errors.Wrap(err, "invalid tail base 64 value") + } + + return append(b[:protected.Length], tail...), nil +} + +type blobStateCalculator struct { + empty bool + digester digest.Digester +} + +func newBlobStateCalculator() *blobStateCalculator { + return &blobStateCalculator{ + empty: true, + digester: digest.Canonical.Digester(), + } +} + +func (c *blobStateCalculator) Write(p []byte) (int, error) { + if c.empty { + for _, b := range p { + if b != 0x00 { + c.empty = false + break + } + } + } + return c.digester.Hash().Write(p) +} + +func (c *blobStateCalculator) State() blobState { + return blobState{ + empty: c.empty, + diffID: c.digester.Digest(), + } +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/scope.go b/vendor/github.com/containerd/containerd/remotes/docker/scope.go new file mode 100644 index 0000000000..fe57f023d3 --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/scope.go @@ -0,0 +1,97 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "context" + "fmt" + "net/url" + "sort" + "strings" + + "github.com/containerd/containerd/reference" +) + +// RepositoryScope returns a repository scope string such as "repository:foo/bar:pull" +// for "host/foo/bar:baz". +// When push is true, both pull and push are added to the scope. +func RepositoryScope(refspec reference.Spec, push bool) (string, error) { + u, err := url.Parse("dummy://" + refspec.Locator) + if err != nil { + return "", err + } + s := "repository:" + strings.TrimPrefix(u.Path, "/") + ":pull" + if push { + s += ",push" + } + return s, nil +} + +// tokenScopesKey is used for the key for context.WithValue(). +// value: []string (e.g. {"registry:foo/bar:pull"}) +type tokenScopesKey struct{} + +// ContextWithRepositoryScope returns a context with tokenScopesKey{} and the repository scope value. +func ContextWithRepositoryScope(ctx context.Context, refspec reference.Spec, push bool) (context.Context, error) { + s, err := RepositoryScope(refspec, push) + if err != nil { + return nil, err + } + return WithScope(ctx, s), nil +} + +// WithScope appends a custom registry auth scope to the context. +func WithScope(ctx context.Context, scope string) context.Context { + var scopes []string + if v := ctx.Value(tokenScopesKey{}); v != nil { + scopes = v.([]string) + scopes = append(scopes, scope) + } else { + scopes = []string{scope} + } + return context.WithValue(ctx, tokenScopesKey{}, scopes) +} + +// ContextWithAppendPullRepositoryScope is used to append repository pull +// scope into existing scopes indexed by the tokenScopesKey{}. +func ContextWithAppendPullRepositoryScope(ctx context.Context, repo string) context.Context { + return WithScope(ctx, fmt.Sprintf("repository:%s:pull", repo)) +} + +// GetTokenScopes returns deduplicated and sorted scopes from ctx.Value(tokenScopesKey{}) and common scopes. +func GetTokenScopes(ctx context.Context, common []string) []string { + var scopes []string + if x := ctx.Value(tokenScopesKey{}); x != nil { + scopes = append(scopes, x.([]string)...) + } + + scopes = append(scopes, common...) + sort.Strings(scopes) + + l := 0 + for idx := 1; idx < len(scopes); idx++ { + // Note: this comparison is unaware of the scope grammar (https://docs.docker.com/registry/spec/auth/scope/) + // So, "repository:foo/bar:pull,push" != "repository:foo/bar:push,pull", although semantically they are equal. + if scopes[l] == scopes[idx] { + continue + } + + l++ + scopes[l] = scopes[idx] + } + return scopes[:l+1] +} diff --git a/vendor/github.com/containerd/containerd/remotes/docker/status.go b/vendor/github.com/containerd/containerd/remotes/docker/status.go new file mode 100644 index 0000000000..9751edac7f --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/docker/status.go @@ -0,0 +1,87 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package docker + +import ( + "sync" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/moby/locker" + "github.com/pkg/errors" +) + +// Status of a content operation +type Status struct { + content.Status + + Committed bool + + // UploadUUID is used by the Docker registry to reference blob uploads + UploadUUID string +} + +// StatusTracker to track status of operations +type StatusTracker interface { + GetStatus(string) (Status, error) + SetStatus(string, Status) +} + +// StatusTrackLocker to track status of operations with lock +type StatusTrackLocker interface { + StatusTracker + Lock(string) + Unlock(string) +} + +type memoryStatusTracker struct { + statuses map[string]Status + m sync.Mutex + locker *locker.Locker +} + +// NewInMemoryTracker returns a StatusTracker that tracks content status in-memory +func NewInMemoryTracker() StatusTrackLocker { + return &memoryStatusTracker{ + statuses: map[string]Status{}, + locker: locker.New(), + } +} + +func (t *memoryStatusTracker) GetStatus(ref string) (Status, error) { + t.m.Lock() + defer t.m.Unlock() + status, ok := t.statuses[ref] + if !ok { + return Status{}, errors.Wrapf(errdefs.ErrNotFound, "status for ref %v", ref) + } + return status, nil +} + +func (t *memoryStatusTracker) SetStatus(ref string, status Status) { + t.m.Lock() + t.statuses[ref] = status + t.m.Unlock() +} + +func (t *memoryStatusTracker) Lock(ref string) { + t.locker.Lock(ref) +} + +func (t *memoryStatusTracker) Unlock(ref string) { + t.locker.Unlock(ref) +} diff --git a/vendor/github.com/containerd/containerd/remotes/errors/errors.go b/vendor/github.com/containerd/containerd/remotes/errors/errors.go new file mode 100644 index 0000000000..519dbac105 --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/errors/errors.go @@ -0,0 +1,56 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package errors + +import ( + "fmt" + "io" + "io/ioutil" + "net/http" +) + +var _ error = ErrUnexpectedStatus{} + +// ErrUnexpectedStatus is returned if a registry API request returned with unexpected HTTP status +type ErrUnexpectedStatus struct { + Status string + StatusCode int + Body []byte + RequestURL, RequestMethod string +} + +func (e ErrUnexpectedStatus) Error() string { + return fmt.Sprintf("unexpected status: %s", e.Status) +} + +// NewUnexpectedStatusErr creates an ErrUnexpectedStatus from HTTP response +func NewUnexpectedStatusErr(resp *http.Response) error { + var b []byte + if resp.Body != nil { + b, _ = ioutil.ReadAll(io.LimitReader(resp.Body, 64000)) // 64KB + } + err := ErrUnexpectedStatus{ + Body: b, + Status: resp.Status, + StatusCode: resp.StatusCode, + RequestMethod: resp.Request.Method, + } + if resp.Request.URL != nil { + err.RequestURL = resp.Request.URL.String() + } + return err +} diff --git a/vendor/github.com/containerd/containerd/remotes/handlers.go b/vendor/github.com/containerd/containerd/remotes/handlers.go new file mode 100644 index 0000000000..8f79c608e6 --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/handlers.go @@ -0,0 +1,331 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package remotes + +import ( + "context" + "fmt" + "io" + "strings" + "sync" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/platforms" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "golang.org/x/sync/semaphore" +) + +type refKeyPrefix struct{} + +// WithMediaTypeKeyPrefix adds a custom key prefix for a media type which is used when storing +// data in the content store from the FetchHandler. +// +// Used in `MakeRefKey` to determine what the key prefix should be. +func WithMediaTypeKeyPrefix(ctx context.Context, mediaType, prefix string) context.Context { + var values map[string]string + if v := ctx.Value(refKeyPrefix{}); v != nil { + values = v.(map[string]string) + } else { + values = make(map[string]string) + } + + values[mediaType] = prefix + return context.WithValue(ctx, refKeyPrefix{}, values) +} + +// MakeRefKey returns a unique reference for the descriptor. This reference can be +// used to lookup ongoing processes related to the descriptor. This function +// may look to the context to namespace the reference appropriately. +func MakeRefKey(ctx context.Context, desc ocispec.Descriptor) string { + key := desc.Digest.String() + if desc.Annotations != nil { + if name, ok := desc.Annotations[ocispec.AnnotationRefName]; ok { + key = fmt.Sprintf("%s@%s", name, desc.Digest.String()) + } + } + + if v := ctx.Value(refKeyPrefix{}); v != nil { + values := v.(map[string]string) + if prefix := values[desc.MediaType]; prefix != "" { + return prefix + "-" + key + } + } + + switch mt := desc.MediaType; { + case mt == images.MediaTypeDockerSchema2Manifest || mt == ocispec.MediaTypeImageManifest: + return "manifest-" + key + case mt == images.MediaTypeDockerSchema2ManifestList || mt == ocispec.MediaTypeImageIndex: + return "index-" + key + case images.IsLayerType(mt): + return "layer-" + key + case images.IsKnownConfig(mt): + return "config-" + key + default: + log.G(ctx).Warnf("reference for unknown type: %s", mt) + return "unknown-" + key + } +} + +// FetchHandler returns a handler that will fetch all content into the ingester +// discovered in a call to Dispatch. Use with ChildrenHandler to do a full +// recursive fetch. +func FetchHandler(ingester content.Ingester, fetcher Fetcher) images.HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) { + ctx = log.WithLogger(ctx, log.G(ctx).WithFields(logrus.Fields{ + "digest": desc.Digest, + "mediatype": desc.MediaType, + "size": desc.Size, + })) + + switch desc.MediaType { + case images.MediaTypeDockerSchema1Manifest: + return nil, fmt.Errorf("%v not supported", desc.MediaType) + default: + err := fetch(ctx, ingester, fetcher, desc) + return nil, err + } + } +} + +func fetch(ctx context.Context, ingester content.Ingester, fetcher Fetcher, desc ocispec.Descriptor) error { + log.G(ctx).Debug("fetch") + + cw, err := content.OpenWriter(ctx, ingester, content.WithRef(MakeRefKey(ctx, desc)), content.WithDescriptor(desc)) + if err != nil { + if errdefs.IsAlreadyExists(err) { + return nil + } + return err + } + defer cw.Close() + + ws, err := cw.Status() + if err != nil { + return err + } + + if desc.Size == 0 { + // most likely a poorly configured registry/web front end which responded with no + // Content-Length header; unable (not to mention useless) to commit a 0-length entry + // into the content store. Error out here otherwise the error sent back is confusing + return errors.Wrapf(errdefs.ErrInvalidArgument, "unable to fetch descriptor (%s) which reports content size of zero", desc.Digest) + } + if ws.Offset == desc.Size { + // If writer is already complete, commit and return + err := cw.Commit(ctx, desc.Size, desc.Digest) + if err != nil && !errdefs.IsAlreadyExists(err) { + return errors.Wrapf(err, "failed commit on ref %q", ws.Ref) + } + return nil + } + + rc, err := fetcher.Fetch(ctx, desc) + if err != nil { + return err + } + defer rc.Close() + + return content.Copy(ctx, cw, rc, desc.Size, desc.Digest) +} + +// PushHandler returns a handler that will push all content from the provider +// using a writer from the pusher. +func PushHandler(pusher Pusher, provider content.Provider) images.HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + ctx = log.WithLogger(ctx, log.G(ctx).WithFields(logrus.Fields{ + "digest": desc.Digest, + "mediatype": desc.MediaType, + "size": desc.Size, + })) + + err := push(ctx, provider, pusher, desc) + return nil, err + } +} + +func push(ctx context.Context, provider content.Provider, pusher Pusher, desc ocispec.Descriptor) error { + log.G(ctx).Debug("push") + + var ( + cw content.Writer + err error + ) + if cs, ok := pusher.(content.Ingester); ok { + cw, err = content.OpenWriter(ctx, cs, content.WithRef(MakeRefKey(ctx, desc)), content.WithDescriptor(desc)) + } else { + cw, err = pusher.Push(ctx, desc) + } + if err != nil { + if !errdefs.IsAlreadyExists(err) { + return err + } + + return nil + } + defer cw.Close() + + ra, err := provider.ReaderAt(ctx, desc) + if err != nil { + return err + } + defer ra.Close() + + rd := io.NewSectionReader(ra, 0, desc.Size) + return content.Copy(ctx, cw, rd, desc.Size, desc.Digest) +} + +// PushContent pushes content specified by the descriptor from the provider. +// +// Base handlers can be provided which will be called before any push specific +// handlers. +func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, store content.Store, limiter *semaphore.Weighted, platform platforms.MatchComparer, wrapper func(h images.Handler) images.Handler) error { + + var m sync.Mutex + manifestStack := []ocispec.Descriptor{} + + filterHandler := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + switch desc.MediaType { + case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest, + images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: + m.Lock() + manifestStack = append(manifestStack, desc) + m.Unlock() + return nil, images.ErrStopHandler + default: + return nil, nil + } + }) + + pushHandler := PushHandler(pusher, store) + + platformFilterhandler := images.FilterPlatforms(images.ChildrenHandler(store), platform) + + annotateHandler := annotateDistributionSourceHandler(platformFilterhandler, store) + + var handler images.Handler = images.Handlers( + annotateHandler, + filterHandler, + pushHandler, + ) + if wrapper != nil { + handler = wrapper(handler) + } + + if err := images.Dispatch(ctx, handler, limiter, desc); err != nil { + return err + } + + // Iterate in reverse order as seen, parent always uploaded after child + for i := len(manifestStack) - 1; i >= 0; i-- { + _, err := pushHandler(ctx, manifestStack[i]) + if err != nil { + // TODO(estesp): until we have a more complete method for index push, we need to report + // missing dependencies in an index/manifest list by sensing the "400 Bad Request" + // as a marker for this problem + if (manifestStack[i].MediaType == ocispec.MediaTypeImageIndex || + manifestStack[i].MediaType == images.MediaTypeDockerSchema2ManifestList) && + errors.Cause(err) != nil && strings.Contains(errors.Cause(err).Error(), "400 Bad Request") { + return errors.Wrap(err, "manifest list/index references to blobs and/or manifests are missing in your target registry") + } + return err + } + } + + return nil +} + +// FilterManifestByPlatformHandler allows Handler to handle non-target +// platform's manifest and configuration data. +func FilterManifestByPlatformHandler(f images.HandlerFunc, m platforms.Matcher) images.HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + children, err := f(ctx, desc) + if err != nil { + return nil, err + } + + // no platform information + if desc.Platform == nil || m == nil { + return children, nil + } + + var descs []ocispec.Descriptor + switch desc.MediaType { + case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: + if m.Match(*desc.Platform) { + descs = children + } else { + for _, child := range children { + if child.MediaType == images.MediaTypeDockerSchema2Config || + child.MediaType == ocispec.MediaTypeImageConfig { + + descs = append(descs, child) + } + } + } + default: + descs = children + } + return descs, nil + } +} + +// annotateDistributionSourceHandler add distribution source label into +// annotation of config or blob descriptor. +func annotateDistributionSourceHandler(f images.HandlerFunc, manager content.Manager) images.HandlerFunc { + return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + children, err := f(ctx, desc) + if err != nil { + return nil, err + } + + // only add distribution source for the config or blob data descriptor + switch desc.MediaType { + case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest, + images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: + default: + return children, nil + } + + for i := range children { + child := children[i] + + info, err := manager.Info(ctx, child.Digest) + if err != nil { + return nil, err + } + + for k, v := range info.Labels { + if !strings.HasPrefix(k, "containerd.io/distribution.source.") { + continue + } + + if child.Annotations == nil { + child.Annotations = map[string]string{} + } + child.Annotations[k] = v + } + + children[i] = child + } + return children, nil + } +} diff --git a/vendor/github.com/containerd/containerd/remotes/resolver.go b/vendor/github.com/containerd/containerd/remotes/resolver.go new file mode 100644 index 0000000000..624b14f05d --- /dev/null +++ b/vendor/github.com/containerd/containerd/remotes/resolver.go @@ -0,0 +1,82 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package remotes + +import ( + "context" + "io" + + "github.com/containerd/containerd/content" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +// Resolver provides remotes based on a locator. +type Resolver interface { + // Resolve attempts to resolve the reference into a name and descriptor. + // + // The argument `ref` should be a scheme-less URI representing the remote. + // Structurally, it has a host and path. The "host" can be used to directly + // reference a specific host or be matched against a specific handler. + // + // The returned name should be used to identify the referenced entity. + // Dependending on the remote namespace, this may be immutable or mutable. + // While the name may differ from ref, it should itself be a valid ref. + // + // If the resolution fails, an error will be returned. + Resolve(ctx context.Context, ref string) (name string, desc ocispec.Descriptor, err error) + + // Fetcher returns a new fetcher for the provided reference. + // All content fetched from the returned fetcher will be + // from the namespace referred to by ref. + Fetcher(ctx context.Context, ref string) (Fetcher, error) + + // Pusher returns a new pusher for the provided reference + // The returned Pusher should satisfy content.Ingester and concurrent attempts + // to push the same blob using the Ingester API should result in ErrUnavailable. + Pusher(ctx context.Context, ref string) (Pusher, error) +} + +// Fetcher fetches content +type Fetcher interface { + // Fetch the resource identified by the descriptor. + Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) +} + +// Pusher pushes content +type Pusher interface { + // Push returns a content writer for the given resource identified + // by the descriptor. + Push(ctx context.Context, d ocispec.Descriptor) (content.Writer, error) +} + +// FetcherFunc allows package users to implement a Fetcher with just a +// function. +type FetcherFunc func(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) + +// Fetch content +func (fn FetcherFunc) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) { + return fn(ctx, desc) +} + +// PusherFunc allows package users to implement a Pusher with just a +// function. +type PusherFunc func(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error) + +// Push content +func (fn PusherFunc) Push(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error) { + return fn(ctx, desc) +} diff --git a/vendor/github.com/containerd/containerd/version/version.go b/vendor/github.com/containerd/containerd/version/version.go new file mode 100644 index 0000000000..dda0ee93f6 --- /dev/null +++ b/vendor/github.com/containerd/containerd/version/version.go @@ -0,0 +1,34 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package version + +import "runtime" + +var ( + // Package is filled at linking time + Package = "github.com/containerd/containerd" + + // Version holds the complete version number. Filled in at linking time. + Version = "1.5.9+unknown" + + // Revision is filled with the VCS (e.g. git) revision being used to build + // the program at linking time. + Revision = "" + + // GoVersion is Go tree's version. + GoVersion = runtime.Version() +) diff --git a/vendor/github.com/cyphar/filepath-securejoin/.travis.yml b/vendor/github.com/cyphar/filepath-securejoin/.travis.yml new file mode 100644 index 0000000000..b94ff8cf92 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/.travis.yml @@ -0,0 +1,21 @@ +# Copyright (C) 2017 SUSE LLC. All rights reserved. +# Use of this source code is governed by a BSD-style +# license that can be found in the LICENSE file. + +language: go +go: + - 1.13.x + - 1.16.x + - tip +arch: + - AMD64 + - ppc64le +os: + - linux + - osx + +script: + - go test -cover -v ./... + +notifications: + email: false diff --git a/vendor/github.com/cyphar/filepath-securejoin/LICENSE b/vendor/github.com/cyphar/filepath-securejoin/LICENSE new file mode 100644 index 0000000000..bec842f294 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/LICENSE @@ -0,0 +1,28 @@ +Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. +Copyright (C) 2017 SUSE LLC. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/cyphar/filepath-securejoin/README.md b/vendor/github.com/cyphar/filepath-securejoin/README.md new file mode 100644 index 0000000000..3624617c89 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/README.md @@ -0,0 +1,79 @@ +## `filepath-securejoin` ## + +[![Build Status](https://travis-ci.org/cyphar/filepath-securejoin.svg?branch=master)](https://travis-ci.org/cyphar/filepath-securejoin) + +An implementation of `SecureJoin`, a [candidate for inclusion in the Go +standard library][go#20126]. The purpose of this function is to be a "secure" +alternative to `filepath.Join`, and in particular it provides certain +guarantees that are not provided by `filepath.Join`. + +> **NOTE**: This code is *only* safe if you are not at risk of other processes +> modifying path components after you've used `SecureJoin`. If it is possible +> for a malicious process to modify path components of the resolved path, then +> you will be vulnerable to some fairly trivial TOCTOU race conditions. [There +> are some Linux kernel patches I'm working on which might allow for a better +> solution.][lwn-obeneath] +> +> In addition, with a slightly modified API it might be possible to use +> `O_PATH` and verify that the opened path is actually the resolved one -- but +> I have not done that yet. I might add it in the future as a helper function +> to help users verify the path (we can't just return `/proc/self/fd/` +> because that doesn't always work transparently for all users). + +This is the function prototype: + +```go +func SecureJoin(root, unsafePath string) (string, error) +``` + +This library **guarantees** the following: + +* If no error is set, the resulting string **must** be a child path of + `root` and will not contain any symlink path components (they will all be + expanded). + +* When expanding symlinks, all symlink path components **must** be resolved + relative to the provided root. In particular, this can be considered a + userspace implementation of how `chroot(2)` operates on file paths. Note that + these symlinks will **not** be expanded lexically (`filepath.Clean` is not + called on the input before processing). + +* Non-existent path components are unaffected by `SecureJoin` (similar to + `filepath.EvalSymlinks`'s semantics). + +* The returned path will always be `filepath.Clean`ed and thus not contain any + `..` components. + +A (trivial) implementation of this function on GNU/Linux systems could be done +with the following (note that this requires root privileges and is far more +opaque than the implementation in this library, and also requires that +`readlink` is inside the `root` path): + +```go +package securejoin + +import ( + "os/exec" + "path/filepath" +) + +func SecureJoin(root, unsafePath string) (string, error) { + unsafePath = string(filepath.Separator) + unsafePath + cmd := exec.Command("chroot", root, + "readlink", "--canonicalize-missing", "--no-newline", unsafePath) + output, err := cmd.CombinedOutput() + if err != nil { + return "", err + } + expanded := string(output) + return filepath.Join(root, expanded), nil +} +``` + +[lwn-obeneath]: https://lwn.net/Articles/767547/ +[go#20126]: https://github.com/golang/go/issues/20126 + +### License ### + +The license of this project is the same as Go, which is a BSD 3-clause license +available in the `LICENSE` file. diff --git a/vendor/github.com/cyphar/filepath-securejoin/VERSION b/vendor/github.com/cyphar/filepath-securejoin/VERSION new file mode 100644 index 0000000000..7179039691 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/VERSION @@ -0,0 +1 @@ +0.2.3 diff --git a/vendor/github.com/cyphar/filepath-securejoin/join.go b/vendor/github.com/cyphar/filepath-securejoin/join.go new file mode 100644 index 0000000000..7dd08dbbdf --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/join.go @@ -0,0 +1,115 @@ +// Copyright (C) 2014-2015 Docker Inc & Go Authors. All rights reserved. +// Copyright (C) 2017 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package securejoin is an implementation of the hopefully-soon-to-be-included +// SecureJoin helper that is meant to be part of the "path/filepath" package. +// The purpose of this project is to provide a PoC implementation to make the +// SecureJoin proposal (https://github.com/golang/go/issues/20126) more +// tangible. +package securejoin + +import ( + "bytes" + "errors" + "os" + "path/filepath" + "strings" + "syscall" +) + +// IsNotExist tells you if err is an error that implies that either the path +// accessed does not exist (or path components don't exist). This is +// effectively a more broad version of os.IsNotExist. +func IsNotExist(err error) bool { + // Check that it's not actually an ENOTDIR, which in some cases is a more + // convoluted case of ENOENT (usually involving weird paths). + return errors.Is(err, os.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) || errors.Is(err, syscall.ENOENT) +} + +// SecureJoinVFS joins the two given path components (similar to Join) except +// that the returned path is guaranteed to be scoped inside the provided root +// path (when evaluated). Any symbolic links in the path are evaluated with the +// given root treated as the root of the filesystem, similar to a chroot. The +// filesystem state is evaluated through the given VFS interface (if nil, the +// standard os.* family of functions are used). +// +// Note that the guarantees provided by this function only apply if the path +// components in the returned string are not modified (in other words are not +// replaced with symlinks on the filesystem) after this function has returned. +// Such a symlink race is necessarily out-of-scope of SecureJoin. +func SecureJoinVFS(root, unsafePath string, vfs VFS) (string, error) { + // Use the os.* VFS implementation if none was specified. + if vfs == nil { + vfs = osVFS{} + } + + var path bytes.Buffer + n := 0 + for unsafePath != "" { + if n > 255 { + return "", &os.PathError{Op: "SecureJoin", Path: root + "/" + unsafePath, Err: syscall.ELOOP} + } + + // Next path component, p. + i := strings.IndexRune(unsafePath, filepath.Separator) + var p string + if i == -1 { + p, unsafePath = unsafePath, "" + } else { + p, unsafePath = unsafePath[:i], unsafePath[i+1:] + } + + // Create a cleaned path, using the lexical semantics of /../a, to + // create a "scoped" path component which can safely be joined to fullP + // for evaluation. At this point, path.String() doesn't contain any + // symlink components. + cleanP := filepath.Clean(string(filepath.Separator) + path.String() + p) + if cleanP == string(filepath.Separator) { + path.Reset() + continue + } + fullP := filepath.Clean(root + cleanP) + + // Figure out whether the path is a symlink. + fi, err := vfs.Lstat(fullP) + if err != nil && !IsNotExist(err) { + return "", err + } + // Treat non-existent path components the same as non-symlinks (we + // can't do any better here). + if IsNotExist(err) || fi.Mode()&os.ModeSymlink == 0 { + path.WriteString(p) + path.WriteRune(filepath.Separator) + continue + } + + // Only increment when we actually dereference a link. + n++ + + // It's a symlink, expand it by prepending it to the yet-unparsed path. + dest, err := vfs.Readlink(fullP) + if err != nil { + return "", err + } + // Absolute symlinks reset any work we've already done. + if filepath.IsAbs(dest) { + path.Reset() + } + unsafePath = dest + string(filepath.Separator) + unsafePath + } + + // We have to clean path.String() here because it may contain '..' + // components that are entirely lexical, but would be misleading otherwise. + // And finally do a final clean to ensure that root is also lexically + // clean. + fullP := filepath.Clean(string(filepath.Separator) + path.String()) + return filepath.Clean(root + fullP), nil +} + +// SecureJoin is a wrapper around SecureJoinVFS that just uses the os.* library +// of functions as the VFS. If in doubt, use this function over SecureJoinVFS. +func SecureJoin(root, unsafePath string) (string, error) { + return SecureJoinVFS(root, unsafePath, nil) +} diff --git a/vendor/github.com/cyphar/filepath-securejoin/vfs.go b/vendor/github.com/cyphar/filepath-securejoin/vfs.go new file mode 100644 index 0000000000..a82a5eae11 --- /dev/null +++ b/vendor/github.com/cyphar/filepath-securejoin/vfs.go @@ -0,0 +1,41 @@ +// Copyright (C) 2017 SUSE LLC. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package securejoin + +import "os" + +// In future this should be moved into a separate package, because now there +// are several projects (umoci and go-mtree) that are using this sort of +// interface. + +// VFS is the minimal interface necessary to use SecureJoinVFS. A nil VFS is +// equivalent to using the standard os.* family of functions. This is mainly +// used for the purposes of mock testing, but also can be used to otherwise use +// SecureJoin with VFS-like system. +type VFS interface { + // Lstat returns a FileInfo describing the named file. If the file is a + // symbolic link, the returned FileInfo describes the symbolic link. Lstat + // makes no attempt to follow the link. These semantics are identical to + // os.Lstat. + Lstat(name string) (os.FileInfo, error) + + // Readlink returns the destination of the named symbolic link. These + // semantics are identical to os.Readlink. + Readlink(name string) (string, error) +} + +// osVFS is the "nil" VFS, in that it just passes everything through to the os +// module. +type osVFS struct{} + +// Lstat returns a FileInfo describing the named file. If the file is a +// symbolic link, the returned FileInfo describes the symbolic link. Lstat +// makes no attempt to follow the link. These semantics are identical to +// os.Lstat. +func (o osVFS) Lstat(name string) (os.FileInfo, error) { return os.Lstat(name) } + +// Readlink returns the destination of the named symbolic link. These +// semantics are identical to os.Readlink. +func (o osVFS) Readlink(name string) (string, error) { return os.Readlink(name) } diff --git a/vendor/github.com/docker/cli/AUTHORS b/vendor/github.com/docker/cli/AUTHORS new file mode 100644 index 0000000000..8990f85b56 --- /dev/null +++ b/vendor/github.com/docker/cli/AUTHORS @@ -0,0 +1,771 @@ +# This file lists all individuals having contributed content to the repository. +# For how it is generated, see `scripts/docs/generate-authors.sh`. + +Aanand Prasad +Aaron L. Xu +Aaron Lehmann +Aaron.L.Xu +Abdur Rehman +Abhinandan Prativadi +Abin Shahab +Abreto FU +Ace Tang +Addam Hardy +Adolfo Ochagavía +Adrian Plata +Adrien Duermael +Adrien Folie +Ahmet Alp Balkan +Aidan Feldman +Aidan Hobson Sayers +AJ Bowen +Akhil Mohan +Akihiro Suda +Akim Demaille +Alan Thompson +Albert Callarisa +Albin Kerouanton +Aleksa Sarai +Aleksander Piotrowski +Alessandro Boch +Alex Mavrogiannis +Alex Mayer +Alexander Boyd +Alexander Larsson +Alexander Morozov +Alexander Ryabov +Alexandre González +Alfred Landrum +Alicia Lauerman +Allen Sun +Alvin Deng +Amen Belayneh +Amir Goldstein +Amit Krishnan +Amit Shukla +Amy Lindburg +Anca Iordache +Anda Xu +Andrea Luzzardi +Andreas Köhler +Andrew France +Andrew Hsu +Andrew Macpherson +Andrew McDonnell +Andrew Po +Andrey Petrov +Andrii Berehuliak +André Martins +Andy Goldstein +Andy Rothfusz +Anil Madhavapeddy +Ankush Agarwal +Anne Henmi +Anton Polonskiy +Antonio Murdaca +Antonis Kalipetis +Anusha Ragunathan +Ao Li +Arash Deshmeh +Arko Dasgupta +Arnaud Porterie +Arthur Peka +Ashwini Oruganti +Azat Khuyiyakhmetov +Bardia Keyoumarsi +Barnaby Gray +Bastiaan Bakker +BastianHofmann +Ben Bonnefoy +Ben Creasy +Ben Firshman +Benjamin Boudreau +Benoit Sigoure +Bhumika Bayani +Bill Wang +Bin Liu +Bingshen Wang +Boaz Shuster +Bogdan Anton +Boris Pruessmann +Bradley Cicenas +Brandon Mitchell +Brandon Philips +Brent Salisbury +Bret Fisher +Brian (bex) Exelbierd +Brian Goff +Brian Wieder +Bryan Bess +Bryan Boreham +Bryan Murphy +bryfry +Cameron Spear +Cao Weiwei +Carlo Mion +Carlos Alexandro Becker +Carlos de Paula +Ce Gao +Cedric Davies +Cezar Sa Espinola +Chad Faragher +Chao Wang +Charles Chan +Charles Law +Charles Smith +Charlie Drage +ChaYoung You +Chen Chuanliang +Chen Hanxiao +Chen Mingjie +Chen Qiu +Chris Gavin +Chris Gibson +Chris McKinnel +Chris Snow +Chris Weyl +Christian Persson +Christian Stefanescu +Christophe Robin +Christophe Vidal +Christopher Biscardi +Christopher Crone +Christopher Jones +Christy Norman +Chun Chen +Clinton Kitson +Coenraad Loubser +Colin Hebert +Collin Guarino +Colm Hally +Comical Derskeal <27731088+derskeal@users.noreply.github.com> +Corey Farrell +Corey Quon +Craig Wilhite +Cristian Staretu +Daehyeok Mun +Dafydd Crosby +Daisuke Ito +dalanlan +Damien Nadé +Dan Cotora +Daniel Artine +Daniel Cassidy +Daniel Dao +Daniel Farrell +Daniel Gasienica +Daniel Goosen +Daniel Helfand +Daniel Hiltgen +Daniel J Walsh +Daniel Nephin +Daniel Norberg +Daniel Watkins +Daniel Zhang +Daniil Nikolenko +Danny Berger +Darren Shepherd +Darren Stahl +Dattatraya Kumbhar +Dave Goodchild +Dave Henderson +Dave Tucker +David Beitey +David Calavera +David Cramer +David Dooling +David Gageot +David Lechner +David Scott +David Sheets +David Williamson +David Xia +David Young +Deng Guangxing +Denis Defreyne +Denis Gladkikh +Denis Ollier +Dennis Docter +Derek McGowan +Deshi Xiao +Dharmit Shah +Dhawal Yogesh Bhanushali +Dieter Reuter +Dima Stopel +Dimitry Andric +Ding Fei +Diogo Monica +Djordje Lukic +Dmitry Gusev +Dmitry Smirnov +Dmitry V. Krivenok +Dominik Braun +Don Kjer +Dong Chen +Doug Davis +Drew Erny +Ed Costello +Elango Sivanandam +Eli Uriegas +Eli Uriegas +Elias Faxö +Elliot Luo <956941328@qq.com> +Eric Curtin +Eric G. Noriega +Eric Rosenberg +Eric Sage +Eric-Olivier Lamey +Erica Windisch +Erik Hollensbe +Erik St. Martin +Essam A. Hassan +Ethan Haynes +Euan Kemp +Eugene Yakubovich +Evan Allrich +Evan Hazlett +Evan Krall +Evelyn Xu +Everett Toews +Fabio Falci +Fabrizio Soppelsa +Felix Hupfeld +Felix Rabe +Filip Jareš +Flavio Crisciani +Florian Klein +Forest Johnson +Foysal Iqbal +François Scala +Fred Lifton +Frederic Hemberger +Frederick F. Kautz IV +Frederik Nordahl Jul Sabroe +Frieder Bluemle +Gabriel Nicolas Avellaneda +Gaetan de Villele +Gang Qiao +Gary Schaetz +Genki Takiuchi +George MacRorie +George Xie +Gianluca Borello +Gildas Cuisinier +Goksu Toprak +Gou Rao +Grant Reaber +Greg Pflaum +Guilhem Lettron +Guillaume J. Charmes +Guillaume Le Floch +gwx296173 +Günther Jungbluth +Hakan Özler +Hao Zhang <21521210@zju.edu.cn> +Harald Albers +Harold Cooper +Harry Zhang +He Simei +Hector S +Helen Xie +Henning Sprang +Henry N +Hernan Garcia +Hongbin Lu +Hu Keping +Huayi Zhang +Hugo Gabriel Eyherabide +huqun +Huu Nguyen +Hyzhou Zhy +Ian Campbell +Ian Philpot +Ignacio Capurro +Ilya Dmitrichenko +Ilya Khlopotov +Ilya Sotkov +Ioan Eugen Stan +Isabel Jimenez +Ivan Grcic +Ivan Markin +Jacob Atzen +Jacob Tomlinson +Jaivish Kothari +Jake Lambert +Jake Sanders +James Nesbitt +James Turnbull +Jamie Hannaford +Jan Koprowski +Jan Pazdziora +Jan-Jaap Driessen +Jana Radhakrishnan +Jared Hocutt +Jasmine Hegman +Jason Heiss +Jason Plum +Jay Kamat +Jean Rouge +Jean-Christophe Sirot +Jean-Pierre Huynh +Jeff Lindsay +Jeff Nickoloff +Jeff Silberman +Jeremy Chambers +Jeremy Unruh +Jeremy Yallop +Jeroen Franse +Jesse Adametz +Jessica Frazelle +Jezeniel Zapanta +Jian Zhang +Jie Luo +Jilles Oldenbeuving +Jim Galasyn +Jimmy Leger +Jimmy Song +jimmyxian +Jintao Zhang +Joao Fernandes +Joe Abbey +Joe Doliner +Joe Gordon +Joel Handwell +Joey Geiger +Joffrey F +Johan Euphrosine +Johannes 'fish' Ziemke +John Feminella +John Harris +John Howard +John Laswell +John Maguire +John Mulhausen +John Starks +John Stephens +John Tims +John V. Martinez +John Willis +Jon Johnson +Jonatas Baldin +Jonathan Boulle +Jonathan Lee +Jonathan Lomas +Jonathan McCrohan +Jonh Wendell +Jordan Jennings +Jose J. Escobar <53836904+jescobar-docker@users.noreply.github.com> +Joseph Kern +Josh Bodah +Josh Chorlton +Josh Hawn +Josh Horwitz +Josh Soref +Julien Barbier +Julien Kassar +Julien Maitrehenry +Justas Brazauskas +Justin Cormack +Justin Simonelis +Justyn Temme +Jyrki Puttonen +Jérémie Drouet +Jérôme Petazzoni +Jörg Thalheim +Kai Blin +Kai Qiang Wu (Kennan) +Kara Alexandra +Kareem Khazem +Karthik Nayak +Kat Samperi +Kathryn Spiers +Katie McLaughlin +Ke Xu +Kei Ohmura +Keith Hudgins +Ken Cochrane +Ken ICHIKAWA +Kenfe-Mickaël Laventure +Kevin Burke +Kevin Feyrer +Kevin Kern +Kevin Kirsche +Kevin Meredith +Kevin Richardson +Kevin Woblick +khaled souf +Kim Eik +Kir Kolyshkin +Kotaro Yoshimatsu +Krasi Georgiev +Kris-Mikael Krister +Kun Zhang +Kunal Kushwaha +Lachlan Cooper +Lai Jiangshan +Lars Kellogg-Stedman +Laura Frank +Laurent Erignoux +Lee Gaines +Lei Jitang +Lennie +Leo Gallucci +Lewis Daly +Li Yi +Li Yi +Liang-Chi Hsieh +Lifubang +Lihua Tang +Lily Guo +Lin Lu +Linus Heckemann +Liping Xue +Liron Levin +liwenqi +lixiaobing10051267 +Lloyd Dewolf +Lorenzo Fontana +Louis Opter +Luca Favatella +Luca Marturana +Lucas Chan +Luka Hartwig +Lukas Heeren +Lukasz Zajaczkowski +Lydell Manganti +Lénaïc Huard +Ma Shimiao +Mabin +Maciej Kalisz +Madhav Puri +Madhu Venugopal +Madhur Batra +Malte Janduda +Manjunath A Kumatagi +Mansi Nahar +mapk0y +Marc Bihlmaier +Marco Mariani +Marco Vedovati +Marcus Martins +Marianna Tessel +Marius Ileana +Marius Sturm +Mark Oates +Marsh Macy +Martin Mosegaard Amdisen +Mary Anthony +Mason Fish +Mason Malone +Mateusz Major +Mathieu Champlon +Matt Gucci +Matt Robenolt +Matteo Orefice +Matthew Heon +Matthieu Hauglustaine +Mauro Porras P +Max Shytikov +Maxime Petazzoni +Mei ChunTao +Micah Zoltu +Michael A. Smith +Michael Bridgen +Michael Crosby +Michael Friis +Michael Irwin +Michael Käufl +Michael Prokop +Michael Scharf +Michael Spetsiotis +Michael Steinert +Michael West +Michal Minář +Michał Czeraszkiewicz +Miguel Angel Alvarez Cabrerizo +Mihai Borobocea +Mihuleacc Sergiu +Mike Brown +Mike Casas +Mike Danese +Mike Dillon +Mike Goelzer +Mike MacCana +mikelinjie <294893458@qq.com> +Mikhail Vasin +Milind Chawre +Mindaugas Rukas +Miroslav Gula +Misty Stanley-Jones +Mohammad Banikazemi +Mohammed Aaqib Ansari +Mohini Anne Dsouza +Moorthy RS +Morgan Bauer +Morten Hekkvang +Moysés Borges +Mrunal Patel +muicoder +Muthukumar R +Máximo Cuadros +Mårten Cassel +Nace Oroz +Nahum Shalman +Nalin Dahyabhai +Nao YONASHIRO +Nassim 'Nass' Eddequiouaq +Natalie Parker +Nate Brennand +Nathan Hsieh +Nathan LeClaire +Nathan McCauley +Neil Peterson +Nick Adcock +Nico Stapelbroek +Nicola Kabar +Nicolas Borboën +Nicolas De Loof +Nikhil Chawla +Nikolas Garofil +Nikolay Milovanov +Nir Soffer +Nishant Totla +NIWA Hideyuki +Noah Treuhaft +O.S. Tezer +Odin Ugedal +ohmystack +Olle Jonsson +Olli Janatuinen +Oscar Wieman +Otto Kekäläinen +Ovidio Mallo +Pascal Borreli +Patrick Böänziger +Patrick Hemmer +Patrick Lang +Paul +Paul Kehrer +Paul Lietar +Paul Mulders +Paul Weaver +Pavel Pospisil +Paweł Szczekutowicz +Peeyush Gupta +Per Lundberg +Peter Edge +Peter Hsu +Peter Jaffe +Peter Kehl +Peter Nagy +Peter Salvatore +Peter Waller +Phil Estes +Philip Alexander Etling +Philipp Gillé +Philipp Schmied +pidster +pixelistik +Pratik Karki +Prayag Verma +Preston Cowley +Pure White +Qiang Huang +Qinglan Peng +qudongfang +Raghavendra K T +Rahul Zoldyck +Ravi Shekhar Jethani +Ray Tsang +Reficul +Remy Suen +Renaud Gaubert +Ricardo N Feliciano +Rich Moyse +Richard Mathie +Richard Scothern +Rick Wieman +Ritesh H Shukla +Riyaz Faizullabhoy +Rob Gulewich +Robert Wallis +Robin Naundorf +Robin Speekenbrink +Rodolfo Ortiz +Rogelio Canedo +Rohan Verma +Roland Kammerer +Roman Dudin +Rory Hunter +Ross Boucher +Rubens Figueiredo +Rui Cao +Ryan Belgrave +Ryan Detzel +Ryan Stelly +Ryan Wilson-Perkin +Ryan Zhang +Sainath Grandhi +Sakeven Jiang +Sally O'Malley +Sam Neirinck +Samarth Shah +Sambuddha Basu +Sami Tabet +Samuel Cochran +Samuel Karp +Santhosh Manohar +Sargun Dhillon +Saswat Bhattacharya +Scott Brenner +Scott Collier +Sean Christopherson +Sean Rodman +Sebastiaan van Stijn +Sergey Tryuber +Serhat Gülçiçek +Sevki Hasirci +Shaun Kaasten +Sheng Yang +Shijiang Wei +Shishir Mahajan +Shoubhik Bose +Shukui Yang +Sian Lerk Lau +Sidhartha Mani +sidharthamani +Silvin Lubecki +Simei He +Simon Ferquel +Simon Heimberg +Sindhu S +Slava Semushin +Solomon Hykes +Song Gao +Spencer Brown +squeegels <1674195+squeegels@users.noreply.github.com> +Srini Brahmaroutu +Stefan S. +Stefan Scherer +Stefan Weil +Stephane Jeandeaux +Stephen Day +Stephen Rust +Steve Durrheimer +Steve Richards +Steven Burgess +Subhajit Ghosh +Sun Jianbo +Sune Keller +Sungwon Han +Sunny Gogoi +Sven Dowideit +Sylvain Baubeau +Sébastien HOUZÉ +T K Sourabh +TAGOMORI Satoshi +taiji-tech +Taylor Jones +Tejaswini Duggaraju +Tengfei Wang +Teppei Fukuda +Thatcher Peskens +Thibault Coupin +Thomas Gazagnaire +Thomas Krzero +Thomas Leonard +Thomas Léveil +Thomas Riccardi +Thomas Swift +Tianon Gravi +Tianyi Wang +Tibor Vass +Tim Dettrick +Tim Hockin +Tim Sampson +Tim Smith +Tim Waugh +Tim Wraight +timfeirg +Timothy Hobbs +Tobias Bradtke +Tobias Gesellchen +Todd Whiteman +Tom Denham +Tom Fotherby +Tom Klingenberg +Tom Milligan +Tom X. Tobin +Tomas Tomecek +Tomasz Kopczynski +Tomáš Hrčka +Tony Abboud +Tõnis Tiigi +Trapier Marshall +Travis Cline +Tristan Carel +Tycho Andersen +Tycho Andersen +uhayate +Ulrich Bareth +Ulysses Souza +Umesh Yadav +Valentin Lorentz +Venkateswara Reddy Bukkasamudram +Veres Lajos +Victor Vieux +Victoria Bialas +Viktor Stanchev +Vimal Raghubir +Vincent Batts +Vincent Bernat +Vincent Demeester +Vincent Woo +Vishnu Kannan +Vivek Goyal +Wang Jie +Wang Lei +Wang Long +Wang Ping +Wang Xing +Wang Yuexiao +Wang Yumu <37442693@qq.com> +Wataru Ishida +Wayne Song +Wen Cheng Ma +Wenzhi Liang +Wes Morgan +Wewang Xiaorenfine +William Henry +Xianglin Gao +Xiaodong Liu +Xiaodong Zhang +Xiaoxi He +Xinbo Weng +Xuecong Liao +Yan Feng +Yanqiang Miao +Yassine Tijani +Yi EungJun +Ying Li +Yong Tang +Yosef Fertel +Yu Peng +Yuan Sun +Yue Zhang +Yunxiang Huang +Zachary Romero +Zander Mackie +zebrilee +Zhang Kun +Zhang Wei +Zhang Wentao +ZhangHang +zhenghenghuo +Zhou Hao +Zhoulin Xie +Zhu Guihua +Álex González +Álvaro Lázaro +Átila Camurça Alves +徐俊杰 diff --git a/vendor/github.com/docker/cli/LICENSE b/vendor/github.com/docker/cli/LICENSE new file mode 100644 index 0000000000..9c8e20ab85 --- /dev/null +++ b/vendor/github.com/docker/cli/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2017 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/cli/NOTICE b/vendor/github.com/docker/cli/NOTICE new file mode 100644 index 0000000000..58b19b6d15 --- /dev/null +++ b/vendor/github.com/docker/cli/NOTICE @@ -0,0 +1,19 @@ +Docker +Copyright 2012-2017 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +This product contains software (https://github.com/creack/pty) developed +by Keith Rarick, licensed under the MIT License. + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/docker/cli/cli/config/config.go b/vendor/github.com/docker/cli/cli/config/config.go new file mode 100644 index 0000000000..93275f3d98 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/config.go @@ -0,0 +1,163 @@ +package config + +import ( + "fmt" + "io" + "os" + "path/filepath" + "strings" + "sync" + + "github.com/docker/cli/cli/config/configfile" + "github.com/docker/cli/cli/config/credentials" + "github.com/docker/cli/cli/config/types" + "github.com/docker/docker/pkg/homedir" + "github.com/pkg/errors" +) + +const ( + // ConfigFileName is the name of config file + ConfigFileName = "config.json" + configFileDir = ".docker" + oldConfigfile = ".dockercfg" + contextsDir = "contexts" +) + +var ( + initConfigDir = new(sync.Once) + configDir string + homeDir string +) + +// resetHomeDir is used in testing to reset the "homeDir" package variable to +// force re-lookup of the home directory between tests. +func resetHomeDir() { + homeDir = "" +} + +func getHomeDir() string { + if homeDir == "" { + homeDir = homedir.Get() + } + return homeDir +} + +// resetConfigDir is used in testing to reset the "configDir" package variable +// and its sync.Once to force re-lookup between tests. +func resetConfigDir() { + configDir = "" + initConfigDir = new(sync.Once) +} + +func setConfigDir() { + if configDir != "" { + return + } + configDir = os.Getenv("DOCKER_CONFIG") + if configDir == "" { + configDir = filepath.Join(getHomeDir(), configFileDir) + } +} + +// Dir returns the directory the configuration file is stored in +func Dir() string { + initConfigDir.Do(setConfigDir) + return configDir +} + +// ContextStoreDir returns the directory the docker contexts are stored in +func ContextStoreDir() string { + return filepath.Join(Dir(), contextsDir) +} + +// SetDir sets the directory the configuration file is stored in +func SetDir(dir string) { + configDir = filepath.Clean(dir) +} + +// Path returns the path to a file relative to the config dir +func Path(p ...string) (string, error) { + path := filepath.Join(append([]string{Dir()}, p...)...) + if !strings.HasPrefix(path, Dir()+string(filepath.Separator)) { + return "", errors.Errorf("path %q is outside of root config directory %q", path, Dir()) + } + return path, nil +} + +// LegacyLoadFromReader is a convenience function that creates a ConfigFile object from +// a non-nested reader +func LegacyLoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { + configFile := configfile.ConfigFile{ + AuthConfigs: make(map[string]types.AuthConfig), + } + err := configFile.LegacyLoadFromReader(configData) + return &configFile, err +} + +// LoadFromReader is a convenience function that creates a ConfigFile object from +// a reader +func LoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { + configFile := configfile.ConfigFile{ + AuthConfigs: make(map[string]types.AuthConfig), + } + err := configFile.LoadFromReader(configData) + return &configFile, err +} + +// TODO remove this temporary hack, which is used to warn about the deprecated ~/.dockercfg file +var printLegacyFileWarning bool + +// Load reads the configuration files in the given directory, and sets up +// the auth config information and returns values. +// FIXME: use the internal golang config parser +func Load(configDir string) (*configfile.ConfigFile, error) { + printLegacyFileWarning = false + + if configDir == "" { + configDir = Dir() + } + + filename := filepath.Join(configDir, ConfigFileName) + configFile := configfile.New(filename) + + // Try happy path first - latest config file + if file, err := os.Open(filename); err == nil { + defer file.Close() + err = configFile.LoadFromReader(file) + if err != nil { + err = errors.Wrap(err, filename) + } + return configFile, err + } else if !os.IsNotExist(err) { + // if file is there but we can't stat it for any reason other + // than it doesn't exist then stop + return configFile, errors.Wrap(err, filename) + } + + // Can't find latest config file so check for the old one + filename = filepath.Join(getHomeDir(), oldConfigfile) + if file, err := os.Open(filename); err == nil { + printLegacyFileWarning = true + defer file.Close() + if err := configFile.LegacyLoadFromReader(file); err != nil { + return configFile, errors.Wrap(err, filename) + } + } + return configFile, nil +} + +// LoadDefaultConfigFile attempts to load the default config file and returns +// an initialized ConfigFile struct if none is found. +func LoadDefaultConfigFile(stderr io.Writer) *configfile.ConfigFile { + configFile, err := Load(Dir()) + if err != nil { + fmt.Fprintf(stderr, "WARNING: Error loading config file: %v\n", err) + } + if printLegacyFileWarning { + _, _ = fmt.Fprintln(stderr, "WARNING: Support for the legacy ~/.dockercfg configuration file and file-format is deprecated and will be removed in an upcoming release") + } + if !configFile.ContainsAuth() { + configFile.CredentialsStore = credentials.DetectDefaultStore(configFile.CredentialsStore) + } + return configFile +} diff --git a/vendor/github.com/docker/cli/cli/config/configfile/file.go b/vendor/github.com/docker/cli/cli/config/configfile/file.go new file mode 100644 index 0000000000..dc9f39eb7e --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/configfile/file.go @@ -0,0 +1,415 @@ +package configfile + +import ( + "encoding/base64" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + + "github.com/docker/cli/cli/config/credentials" + "github.com/docker/cli/cli/config/types" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + // This constant is only used for really old config files when the + // URL wasn't saved as part of the config file and it was just + // assumed to be this value. + defaultIndexServer = "https://index.docker.io/v1/" +) + +// ConfigFile ~/.docker/config.json file info +type ConfigFile struct { + AuthConfigs map[string]types.AuthConfig `json:"auths"` + HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"` + PsFormat string `json:"psFormat,omitempty"` + ImagesFormat string `json:"imagesFormat,omitempty"` + NetworksFormat string `json:"networksFormat,omitempty"` + PluginsFormat string `json:"pluginsFormat,omitempty"` + VolumesFormat string `json:"volumesFormat,omitempty"` + StatsFormat string `json:"statsFormat,omitempty"` + DetachKeys string `json:"detachKeys,omitempty"` + CredentialsStore string `json:"credsStore,omitempty"` + CredentialHelpers map[string]string `json:"credHelpers,omitempty"` + Filename string `json:"-"` // Note: for internal use only + ServiceInspectFormat string `json:"serviceInspectFormat,omitempty"` + ServicesFormat string `json:"servicesFormat,omitempty"` + TasksFormat string `json:"tasksFormat,omitempty"` + SecretFormat string `json:"secretFormat,omitempty"` + ConfigFormat string `json:"configFormat,omitempty"` + NodesFormat string `json:"nodesFormat,omitempty"` + PruneFilters []string `json:"pruneFilters,omitempty"` + Proxies map[string]ProxyConfig `json:"proxies,omitempty"` + Experimental string `json:"experimental,omitempty"` + StackOrchestrator string `json:"stackOrchestrator,omitempty"` + Kubernetes *KubernetesConfig `json:"kubernetes,omitempty"` + CurrentContext string `json:"currentContext,omitempty"` + CLIPluginsExtraDirs []string `json:"cliPluginsExtraDirs,omitempty"` + Plugins map[string]map[string]string `json:"plugins,omitempty"` + Aliases map[string]string `json:"aliases,omitempty"` +} + +// ProxyConfig contains proxy configuration settings +type ProxyConfig struct { + HTTPProxy string `json:"httpProxy,omitempty"` + HTTPSProxy string `json:"httpsProxy,omitempty"` + NoProxy string `json:"noProxy,omitempty"` + FTPProxy string `json:"ftpProxy,omitempty"` +} + +// KubernetesConfig contains Kubernetes orchestrator settings +type KubernetesConfig struct { + AllNamespaces string `json:"allNamespaces,omitempty"` +} + +// New initializes an empty configuration file for the given filename 'fn' +func New(fn string) *ConfigFile { + return &ConfigFile{ + AuthConfigs: make(map[string]types.AuthConfig), + HTTPHeaders: make(map[string]string), + Filename: fn, + Plugins: make(map[string]map[string]string), + Aliases: make(map[string]string), + } +} + +// LegacyLoadFromReader reads the non-nested configuration data given and sets up the +// auth config information with given directory and populates the receiver object +func (configFile *ConfigFile) LegacyLoadFromReader(configData io.Reader) error { + b, err := ioutil.ReadAll(configData) + if err != nil { + return err + } + + if err := json.Unmarshal(b, &configFile.AuthConfigs); err != nil { + arr := strings.Split(string(b), "\n") + if len(arr) < 2 { + return errors.Errorf("The Auth config file is empty") + } + authConfig := types.AuthConfig{} + origAuth := strings.Split(arr[0], " = ") + if len(origAuth) != 2 { + return errors.Errorf("Invalid Auth config file") + } + authConfig.Username, authConfig.Password, err = decodeAuth(origAuth[1]) + if err != nil { + return err + } + authConfig.ServerAddress = defaultIndexServer + configFile.AuthConfigs[defaultIndexServer] = authConfig + } else { + for k, authConfig := range configFile.AuthConfigs { + authConfig.Username, authConfig.Password, err = decodeAuth(authConfig.Auth) + if err != nil { + return err + } + authConfig.Auth = "" + authConfig.ServerAddress = k + configFile.AuthConfigs[k] = authConfig + } + } + return nil +} + +// LoadFromReader reads the configuration data given and sets up the auth config +// information with given directory and populates the receiver object +func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error { + if err := json.NewDecoder(configData).Decode(&configFile); err != nil && !errors.Is(err, io.EOF) { + return err + } + var err error + for addr, ac := range configFile.AuthConfigs { + if ac.Auth != "" { + ac.Username, ac.Password, err = decodeAuth(ac.Auth) + if err != nil { + return err + } + } + ac.Auth = "" + ac.ServerAddress = addr + configFile.AuthConfigs[addr] = ac + } + return checkKubernetesConfiguration(configFile.Kubernetes) +} + +// ContainsAuth returns whether there is authentication configured +// in this file or not. +func (configFile *ConfigFile) ContainsAuth() bool { + return configFile.CredentialsStore != "" || + len(configFile.CredentialHelpers) > 0 || + len(configFile.AuthConfigs) > 0 +} + +// GetAuthConfigs returns the mapping of repo to auth configuration +func (configFile *ConfigFile) GetAuthConfigs() map[string]types.AuthConfig { + return configFile.AuthConfigs +} + +// SaveToWriter encodes and writes out all the authorization information to +// the given writer +func (configFile *ConfigFile) SaveToWriter(writer io.Writer) error { + // Encode sensitive data into a new/temp struct + tmpAuthConfigs := make(map[string]types.AuthConfig, len(configFile.AuthConfigs)) + for k, authConfig := range configFile.AuthConfigs { + authCopy := authConfig + // encode and save the authstring, while blanking out the original fields + authCopy.Auth = encodeAuth(&authCopy) + authCopy.Username = "" + authCopy.Password = "" + authCopy.ServerAddress = "" + tmpAuthConfigs[k] = authCopy + } + + saveAuthConfigs := configFile.AuthConfigs + configFile.AuthConfigs = tmpAuthConfigs + defer func() { configFile.AuthConfigs = saveAuthConfigs }() + + // User-Agent header is automatically set, and should not be stored in the configuration + for v := range configFile.HTTPHeaders { + if strings.EqualFold(v, "User-Agent") { + delete(configFile.HTTPHeaders, v) + } + } + + data, err := json.MarshalIndent(configFile, "", "\t") + if err != nil { + return err + } + _, err = writer.Write(data) + return err +} + +// Save encodes and writes out all the authorization information +func (configFile *ConfigFile) Save() (retErr error) { + if configFile.Filename == "" { + return errors.Errorf("Can't save config with empty filename") + } + + dir := filepath.Dir(configFile.Filename) + if err := os.MkdirAll(dir, 0700); err != nil { + return err + } + temp, err := ioutil.TempFile(dir, filepath.Base(configFile.Filename)) + if err != nil { + return err + } + defer func() { + temp.Close() + if retErr != nil { + if err := os.Remove(temp.Name()); err != nil { + logrus.WithError(err).WithField("file", temp.Name()).Debug("Error cleaning up temp file") + } + } + }() + + err = configFile.SaveToWriter(temp) + if err != nil { + return err + } + + if err := temp.Close(); err != nil { + return errors.Wrap(err, "error closing temp file") + } + + // Handle situation where the configfile is a symlink + cfgFile := configFile.Filename + if f, err := os.Readlink(cfgFile); err == nil { + cfgFile = f + } + + // Try copying the current config file (if any) ownership and permissions + copyFilePermissions(cfgFile, temp.Name()) + return os.Rename(temp.Name(), cfgFile) +} + +// ParseProxyConfig computes proxy configuration by retrieving the config for the provided host and +// then checking this against any environment variables provided to the container +func (configFile *ConfigFile) ParseProxyConfig(host string, runOpts map[string]*string) map[string]*string { + var cfgKey string + + if _, ok := configFile.Proxies[host]; !ok { + cfgKey = "default" + } else { + cfgKey = host + } + + config := configFile.Proxies[cfgKey] + permitted := map[string]*string{ + "HTTP_PROXY": &config.HTTPProxy, + "HTTPS_PROXY": &config.HTTPSProxy, + "NO_PROXY": &config.NoProxy, + "FTP_PROXY": &config.FTPProxy, + } + m := runOpts + if m == nil { + m = make(map[string]*string) + } + for k := range permitted { + if *permitted[k] == "" { + continue + } + if _, ok := m[k]; !ok { + m[k] = permitted[k] + } + if _, ok := m[strings.ToLower(k)]; !ok { + m[strings.ToLower(k)] = permitted[k] + } + } + return m +} + +// encodeAuth creates a base64 encoded string to containing authorization information +func encodeAuth(authConfig *types.AuthConfig) string { + if authConfig.Username == "" && authConfig.Password == "" { + return "" + } + + authStr := authConfig.Username + ":" + authConfig.Password + msg := []byte(authStr) + encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg))) + base64.StdEncoding.Encode(encoded, msg) + return string(encoded) +} + +// decodeAuth decodes a base64 encoded string and returns username and password +func decodeAuth(authStr string) (string, string, error) { + if authStr == "" { + return "", "", nil + } + + decLen := base64.StdEncoding.DecodedLen(len(authStr)) + decoded := make([]byte, decLen) + authByte := []byte(authStr) + n, err := base64.StdEncoding.Decode(decoded, authByte) + if err != nil { + return "", "", err + } + if n > decLen { + return "", "", errors.Errorf("Something went wrong decoding auth config") + } + arr := strings.SplitN(string(decoded), ":", 2) + if len(arr) != 2 { + return "", "", errors.Errorf("Invalid auth configuration file") + } + password := strings.Trim(arr[1], "\x00") + return arr[0], password, nil +} + +// GetCredentialsStore returns a new credentials store from the settings in the +// configuration file +func (configFile *ConfigFile) GetCredentialsStore(registryHostname string) credentials.Store { + if helper := getConfiguredCredentialStore(configFile, registryHostname); helper != "" { + return newNativeStore(configFile, helper) + } + return credentials.NewFileStore(configFile) +} + +// var for unit testing. +var newNativeStore = func(configFile *ConfigFile, helperSuffix string) credentials.Store { + return credentials.NewNativeStore(configFile, helperSuffix) +} + +// GetAuthConfig for a repository from the credential store +func (configFile *ConfigFile) GetAuthConfig(registryHostname string) (types.AuthConfig, error) { + return configFile.GetCredentialsStore(registryHostname).Get(registryHostname) +} + +// getConfiguredCredentialStore returns the credential helper configured for the +// given registry, the default credsStore, or the empty string if neither are +// configured. +func getConfiguredCredentialStore(c *ConfigFile, registryHostname string) string { + if c.CredentialHelpers != nil && registryHostname != "" { + if helper, exists := c.CredentialHelpers[registryHostname]; exists { + return helper + } + } + return c.CredentialsStore +} + +// GetAllCredentials returns all of the credentials stored in all of the +// configured credential stores. +func (configFile *ConfigFile) GetAllCredentials() (map[string]types.AuthConfig, error) { + auths := make(map[string]types.AuthConfig) + addAll := func(from map[string]types.AuthConfig) { + for reg, ac := range from { + auths[reg] = ac + } + } + + defaultStore := configFile.GetCredentialsStore("") + newAuths, err := defaultStore.GetAll() + if err != nil { + return nil, err + } + addAll(newAuths) + + // Auth configs from a registry-specific helper should override those from the default store. + for registryHostname := range configFile.CredentialHelpers { + newAuth, err := configFile.GetAuthConfig(registryHostname) + if err != nil { + return nil, err + } + auths[registryHostname] = newAuth + } + return auths, nil +} + +// GetFilename returns the file name that this config file is based on. +func (configFile *ConfigFile) GetFilename() string { + return configFile.Filename +} + +// PluginConfig retrieves the requested option for the given plugin. +func (configFile *ConfigFile) PluginConfig(pluginname, option string) (string, bool) { + if configFile.Plugins == nil { + return "", false + } + pluginConfig, ok := configFile.Plugins[pluginname] + if !ok { + return "", false + } + value, ok := pluginConfig[option] + return value, ok +} + +// SetPluginConfig sets the option to the given value for the given +// plugin. Passing a value of "" will remove the option. If removing +// the final config item for a given plugin then also cleans up the +// overall plugin entry. +func (configFile *ConfigFile) SetPluginConfig(pluginname, option, value string) { + if configFile.Plugins == nil { + configFile.Plugins = make(map[string]map[string]string) + } + pluginConfig, ok := configFile.Plugins[pluginname] + if !ok { + pluginConfig = make(map[string]string) + configFile.Plugins[pluginname] = pluginConfig + } + if value != "" { + pluginConfig[option] = value + } else { + delete(pluginConfig, option) + } + if len(pluginConfig) == 0 { + delete(configFile.Plugins, pluginname) + } +} + +func checkKubernetesConfiguration(kubeConfig *KubernetesConfig) error { + if kubeConfig == nil { + return nil + } + switch kubeConfig.AllNamespaces { + case "": + case "enabled": + case "disabled": + default: + return fmt.Errorf("invalid 'kubernetes.allNamespaces' value, should be 'enabled' or 'disabled': %s", kubeConfig.AllNamespaces) + } + return nil +} diff --git a/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go b/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go new file mode 100644 index 0000000000..3ca65c6140 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/configfile/file_unix.go @@ -0,0 +1,35 @@ +// +build !windows + +package configfile + +import ( + "os" + "syscall" +) + +// copyFilePermissions copies file ownership and permissions from "src" to "dst", +// ignoring any error during the process. +func copyFilePermissions(src, dst string) { + var ( + mode os.FileMode = 0600 + uid, gid int + ) + + fi, err := os.Stat(src) + if err != nil { + return + } + if fi.Mode().IsRegular() { + mode = fi.Mode() + } + if err := os.Chmod(dst, mode); err != nil { + return + } + + uid = int(fi.Sys().(*syscall.Stat_t).Uid) + gid = int(fi.Sys().(*syscall.Stat_t).Gid) + + if uid > 0 && gid > 0 { + _ = os.Chown(dst, uid, gid) + } +} diff --git a/vendor/github.com/docker/cli/cli/config/configfile/file_windows.go b/vendor/github.com/docker/cli/cli/config/configfile/file_windows.go new file mode 100644 index 0000000000..42fffc39ad --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/configfile/file_windows.go @@ -0,0 +1,5 @@ +package configfile + +func copyFilePermissions(src, dst string) { + // TODO implement for Windows +} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/credentials.go b/vendor/github.com/docker/cli/cli/config/credentials/credentials.go new file mode 100644 index 0000000000..28d58ec48d --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/credentials/credentials.go @@ -0,0 +1,17 @@ +package credentials + +import ( + "github.com/docker/cli/cli/config/types" +) + +// Store is the interface that any credentials store must implement. +type Store interface { + // Erase removes credentials from the store for a given server. + Erase(serverAddress string) error + // Get retrieves credentials from the store for a given server. + Get(serverAddress string) (types.AuthConfig, error) + // GetAll retrieves all the credentials from the store. + GetAll() (map[string]types.AuthConfig, error) + // Store saves credentials in the store. + Store(authConfig types.AuthConfig) error +} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/default_store.go b/vendor/github.com/docker/cli/cli/config/credentials/default_store.go new file mode 100644 index 0000000000..402235bff0 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/credentials/default_store.go @@ -0,0 +1,21 @@ +package credentials + +import ( + exec "golang.org/x/sys/execabs" +) + +// DetectDefaultStore return the default credentials store for the platform if +// the store executable is available. +func DetectDefaultStore(store string) string { + platformDefault := defaultCredentialsStore() + + // user defined or no default for platform + if store != "" || platformDefault == "" { + return store + } + + if _, err := exec.LookPath(remoteCredentialsPrefix + platformDefault); err == nil { + return platformDefault + } + return "" +} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/default_store_darwin.go b/vendor/github.com/docker/cli/cli/config/credentials/default_store_darwin.go new file mode 100644 index 0000000000..5d42dec622 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/credentials/default_store_darwin.go @@ -0,0 +1,5 @@ +package credentials + +func defaultCredentialsStore() string { + return "osxkeychain" +} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go b/vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go new file mode 100644 index 0000000000..a9012c6d4a --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/credentials/default_store_linux.go @@ -0,0 +1,13 @@ +package credentials + +import ( + "os/exec" +) + +func defaultCredentialsStore() string { + if _, err := exec.LookPath("pass"); err == nil { + return "pass" + } + + return "secretservice" +} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go b/vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go new file mode 100644 index 0000000000..3028168ac2 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/credentials/default_store_unsupported.go @@ -0,0 +1,7 @@ +// +build !windows,!darwin,!linux + +package credentials + +func defaultCredentialsStore() string { + return "" +} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/default_store_windows.go b/vendor/github.com/docker/cli/cli/config/credentials/default_store_windows.go new file mode 100644 index 0000000000..bb799ca61b --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/credentials/default_store_windows.go @@ -0,0 +1,5 @@ +package credentials + +func defaultCredentialsStore() string { + return "wincred" +} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/file_store.go b/vendor/github.com/docker/cli/cli/config/credentials/file_store.go new file mode 100644 index 0000000000..e509820b73 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/credentials/file_store.go @@ -0,0 +1,81 @@ +package credentials + +import ( + "strings" + + "github.com/docker/cli/cli/config/types" +) + +type store interface { + Save() error + GetAuthConfigs() map[string]types.AuthConfig + GetFilename() string +} + +// fileStore implements a credentials store using +// the docker configuration file to keep the credentials in plain text. +type fileStore struct { + file store +} + +// NewFileStore creates a new file credentials store. +func NewFileStore(file store) Store { + return &fileStore{file: file} +} + +// Erase removes the given credentials from the file store. +func (c *fileStore) Erase(serverAddress string) error { + delete(c.file.GetAuthConfigs(), serverAddress) + return c.file.Save() +} + +// Get retrieves credentials for a specific server from the file store. +func (c *fileStore) Get(serverAddress string) (types.AuthConfig, error) { + authConfig, ok := c.file.GetAuthConfigs()[serverAddress] + if !ok { + // Maybe they have a legacy config file, we will iterate the keys converting + // them to the new format and testing + for r, ac := range c.file.GetAuthConfigs() { + if serverAddress == ConvertToHostname(r) { + return ac, nil + } + } + + authConfig = types.AuthConfig{} + } + return authConfig, nil +} + +func (c *fileStore) GetAll() (map[string]types.AuthConfig, error) { + return c.file.GetAuthConfigs(), nil +} + +// Store saves the given credentials in the file store. +func (c *fileStore) Store(authConfig types.AuthConfig) error { + c.file.GetAuthConfigs()[authConfig.ServerAddress] = authConfig + return c.file.Save() +} + +func (c *fileStore) GetFilename() string { + return c.file.GetFilename() +} + +func (c *fileStore) IsFileStore() bool { + return true +} + +// ConvertToHostname converts a registry url which has http|https prepended +// to just an hostname. +// Copied from github.com/docker/docker/registry.ConvertToHostname to reduce dependencies. +func ConvertToHostname(url string) string { + stripped := url + if strings.HasPrefix(url, "http://") { + stripped = strings.TrimPrefix(url, "http://") + } else if strings.HasPrefix(url, "https://") { + stripped = strings.TrimPrefix(url, "https://") + } + + nameParts := strings.SplitN(stripped, "/", 2) + + return nameParts[0] +} diff --git a/vendor/github.com/docker/cli/cli/config/credentials/native_store.go b/vendor/github.com/docker/cli/cli/config/credentials/native_store.go new file mode 100644 index 0000000000..afe542cc3c --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/credentials/native_store.go @@ -0,0 +1,143 @@ +package credentials + +import ( + "github.com/docker/cli/cli/config/types" + "github.com/docker/docker-credential-helpers/client" + "github.com/docker/docker-credential-helpers/credentials" +) + +const ( + remoteCredentialsPrefix = "docker-credential-" + tokenUsername = "" +) + +// nativeStore implements a credentials store +// using native keychain to keep credentials secure. +// It piggybacks into a file store to keep users' emails. +type nativeStore struct { + programFunc client.ProgramFunc + fileStore Store +} + +// NewNativeStore creates a new native store that +// uses a remote helper program to manage credentials. +func NewNativeStore(file store, helperSuffix string) Store { + name := remoteCredentialsPrefix + helperSuffix + return &nativeStore{ + programFunc: client.NewShellProgramFunc(name), + fileStore: NewFileStore(file), + } +} + +// Erase removes the given credentials from the native store. +func (c *nativeStore) Erase(serverAddress string) error { + if err := client.Erase(c.programFunc, serverAddress); err != nil { + return err + } + + // Fallback to plain text store to remove email + return c.fileStore.Erase(serverAddress) +} + +// Get retrieves credentials for a specific server from the native store. +func (c *nativeStore) Get(serverAddress string) (types.AuthConfig, error) { + // load user email if it exist or an empty auth config. + auth, _ := c.fileStore.Get(serverAddress) + + creds, err := c.getCredentialsFromStore(serverAddress) + if err != nil { + return auth, err + } + auth.Username = creds.Username + auth.IdentityToken = creds.IdentityToken + auth.Password = creds.Password + + return auth, nil +} + +// GetAll retrieves all the credentials from the native store. +func (c *nativeStore) GetAll() (map[string]types.AuthConfig, error) { + auths, err := c.listCredentialsInStore() + if err != nil { + return nil, err + } + + // Emails are only stored in the file store. + // This call can be safely eliminated when emails are removed. + fileConfigs, _ := c.fileStore.GetAll() + + authConfigs := make(map[string]types.AuthConfig) + for registry := range auths { + creds, err := c.getCredentialsFromStore(registry) + if err != nil { + return nil, err + } + ac := fileConfigs[registry] // might contain Email + ac.Username = creds.Username + ac.Password = creds.Password + ac.IdentityToken = creds.IdentityToken + authConfigs[registry] = ac + } + + return authConfigs, nil +} + +// Store saves the given credentials in the file store. +func (c *nativeStore) Store(authConfig types.AuthConfig) error { + if err := c.storeCredentialsInStore(authConfig); err != nil { + return err + } + authConfig.Username = "" + authConfig.Password = "" + authConfig.IdentityToken = "" + + // Fallback to old credential in plain text to save only the email + return c.fileStore.Store(authConfig) +} + +// storeCredentialsInStore executes the command to store the credentials in the native store. +func (c *nativeStore) storeCredentialsInStore(config types.AuthConfig) error { + creds := &credentials.Credentials{ + ServerURL: config.ServerAddress, + Username: config.Username, + Secret: config.Password, + } + + if config.IdentityToken != "" { + creds.Username = tokenUsername + creds.Secret = config.IdentityToken + } + + return client.Store(c.programFunc, creds) +} + +// getCredentialsFromStore executes the command to get the credentials from the native store. +func (c *nativeStore) getCredentialsFromStore(serverAddress string) (types.AuthConfig, error) { + var ret types.AuthConfig + + creds, err := client.Get(c.programFunc, serverAddress) + if err != nil { + if credentials.IsErrCredentialsNotFound(err) { + // do not return an error if the credentials are not + // in the keychain. Let docker ask for new credentials. + return ret, nil + } + return ret, err + } + + if creds.Username == tokenUsername { + ret.IdentityToken = creds.Secret + } else { + ret.Password = creds.Secret + ret.Username = creds.Username + } + + ret.ServerAddress = serverAddress + return ret, nil +} + +// listCredentialsInStore returns a listing of stored credentials as a map of +// URL -> username. +func (c *nativeStore) listCredentialsInStore() (map[string]string, error) { + return client.List(c.programFunc) +} diff --git a/vendor/github.com/docker/cli/cli/config/types/authconfig.go b/vendor/github.com/docker/cli/cli/config/types/authconfig.go new file mode 100644 index 0000000000..056af6b842 --- /dev/null +++ b/vendor/github.com/docker/cli/cli/config/types/authconfig.go @@ -0,0 +1,22 @@ +package types + +// AuthConfig contains authorization information for connecting to a Registry +type AuthConfig struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` + + // Email is an optional value associated with the username. + // This field is deprecated and will be removed in a later + // version of docker. + Email string `json:"email,omitempty"` + + ServerAddress string `json:"serveraddress,omitempty"` + + // IdentityToken is used to authenticate the user and get + // an access token for the registry. + IdentityToken string `json:"identitytoken,omitempty"` + + // RegistryToken is a bearer token to be sent to a registry + RegistryToken string `json:"registrytoken,omitempty"` +} diff --git a/vendor/github.com/docker/distribution/.gitignore b/vendor/github.com/docker/distribution/.gitignore new file mode 100644 index 0000000000..4cf7888e92 --- /dev/null +++ b/vendor/github.com/docker/distribution/.gitignore @@ -0,0 +1,38 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof + +# never checkin from the bin file (for now) +bin/* + +# Test key files +*.pem + +# Cover profiles +*.out + +# Editor/IDE specific files. +*.sublime-project +*.sublime-workspace +.idea/* diff --git a/vendor/github.com/docker/distribution/.gometalinter.json b/vendor/github.com/docker/distribution/.gometalinter.json new file mode 100644 index 0000000000..9df5b14bcb --- /dev/null +++ b/vendor/github.com/docker/distribution/.gometalinter.json @@ -0,0 +1,16 @@ +{ + "Vendor": true, + "Deadline": "2m", + "Sort": ["linter", "severity", "path", "line"], + "EnableGC": true, + "Enable": [ + "structcheck", + "staticcheck", + "unconvert", + + "gofmt", + "goimports", + "golint", + "vet" + ] +} diff --git a/vendor/github.com/docker/distribution/.mailmap b/vendor/github.com/docker/distribution/.mailmap new file mode 100644 index 0000000000..0f48321d49 --- /dev/null +++ b/vendor/github.com/docker/distribution/.mailmap @@ -0,0 +1,32 @@ +Stephen J Day Stephen Day +Stephen J Day Stephen Day +Olivier Gambier Olivier Gambier +Brian Bland Brian Bland +Brian Bland Brian Bland +Josh Hawn Josh Hawn +Richard Scothern Richard +Richard Scothern Richard Scothern +Andrew Meredith Andrew Meredith +harche harche +Jessie Frazelle +Sharif Nassar Sharif Nassar +Sven Dowideit Sven Dowideit +Vincent Giersch Vincent Giersch +davidli davidli +Omer Cohen Omer Cohen +Eric Yang Eric Yang +Nikita Tarasov Nikita +Yu Wang yuwaMSFT2 +Yu Wang Yu Wang (UC) +Olivier Gambier dmp +Olivier Gambier Olivier +Olivier Gambier Olivier +Elsan Li 李楠 elsanli(李楠) +Rui Cao ruicao +Gwendolynne Barr gbarr01 +Haibing Zhou 周海兵 zhouhaibing089 +Feng Honglin tifayuki +Helen Xie Helen-xie +Mike Brown Mike Brown +Manish Tomar Manish Tomar +Sakeven Jiang sakeven diff --git a/vendor/github.com/docker/distribution/.travis.yml b/vendor/github.com/docker/distribution/.travis.yml new file mode 100644 index 0000000000..44ced60451 --- /dev/null +++ b/vendor/github.com/docker/distribution/.travis.yml @@ -0,0 +1,51 @@ +dist: trusty +sudo: required +# setup travis so that we can run containers for integration tests +services: + - docker + +language: go + +go: + - "1.11.x" + +go_import_path: github.com/docker/distribution + +addons: + apt: + packages: + - python-minimal + + +env: + - TRAVIS_GOOS=linux DOCKER_BUILDTAGS="include_oss include_gcs" TRAVIS_CGO_ENABLED=1 + +before_install: + - uname -r + - sudo apt-get -q update + +install: + - go get -u github.com/vbatts/git-validation + # TODO: Add enforcement of license + # - go get -u github.com/kunalkushwaha/ltag + - cd $TRAVIS_BUILD_DIR + +script: + - export GOOS=$TRAVIS_GOOS + - export CGO_ENABLED=$TRAVIS_CGO_ENABLED + - DCO_VERBOSITY=-q script/validate/dco + - GOOS=linux script/setup/install-dev-tools + - script/validate/vendor + - go build -i . + - make check + - make build + - make binaries + # Currently takes too long + #- if [ "$GOOS" = "linux" ]; then make test-race ; fi + - if [ "$GOOS" = "linux" ]; then make coverage ; fi + +after_success: + - bash <(curl -s https://codecov.io/bash) -F linux + +before_deploy: + # Run tests with storage driver configurations diff --git a/vendor/github.com/docker/distribution/BUILDING.md b/vendor/github.com/docker/distribution/BUILDING.md new file mode 100644 index 0000000000..2981d016b0 --- /dev/null +++ b/vendor/github.com/docker/distribution/BUILDING.md @@ -0,0 +1,117 @@ + +# Building the registry source + +## Use-case + +This is useful if you intend to actively work on the registry. + +### Alternatives + +Most people should use the [official Registry docker image](https://hub.docker.com/r/library/registry/). + +People looking for advanced operational use cases might consider rolling their own image with a custom Dockerfile inheriting `FROM registry:2`. + +OS X users who want to run natively can do so following [the instructions here](https://github.com/docker/docker.github.io/blob/master/registry/recipes/osx-setup-guide.md). + +### Gotchas + +You are expected to know your way around with go & git. + +If you are a casual user with no development experience, and no preliminary knowledge of go, building from source is probably not a good solution for you. + +## Build the development environment + +The first prerequisite of properly building distribution targets is to have a Go +development environment setup. Please follow [How to Write Go Code](https://golang.org/doc/code.html) +for proper setup. If done correctly, you should have a GOROOT and GOPATH set in the +environment. + +If a Go development environment is setup, one can use `go get` to install the +`registry` command from the current latest: + + go get github.com/docker/distribution/cmd/registry + +The above will install the source repository into the `GOPATH`. + +Now create the directory for the registry data (this might require you to set permissions properly) + + mkdir -p /var/lib/registry + +... or alternatively `export REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere` if you want to store data into another location. + +The `registry` +binary can then be run with the following: + + $ $GOPATH/bin/registry --version + $GOPATH/bin/registry github.com/docker/distribution v2.0.0-alpha.1+unknown + +> __NOTE:__ While you do not need to use `go get` to checkout the distribution +> project, for these build instructions to work, the project must be checked +> out in the correct location in the `GOPATH`. This should almost always be +> `$GOPATH/src/github.com/docker/distribution`. + +The registry can be run with the default config using the following +incantation: + + $ $GOPATH/bin/registry serve $GOPATH/src/github.com/docker/distribution/cmd/registry/config-example.yml + INFO[0000] endpoint local-5003 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown + INFO[0000] endpoint local-8083 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown + INFO[0000] listening on :5000 app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown + INFO[0000] debug server listening localhost:5001 + +If it is working, one should see the above log messages. + +### Repeatable Builds + +For the full development experience, one should `cd` into +`$GOPATH/src/github.com/docker/distribution`. From there, the regular `go` +commands, such as `go test`, should work per package (please see +[Developing](#developing) if they don't work). + +A `Makefile` has been provided as a convenience to support repeatable builds. +Please install the following into `GOPATH` for it to work: + + go get github.com/golang/lint/golint + +Once these commands are available in the `GOPATH`, run `make` to get a full +build: + + $ make + + clean + + fmt + + vet + + lint + + build + github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar + github.com/sirupsen/logrus + github.com/docker/libtrust + ... + github.com/yvasiyarov/gorelic + github.com/docker/distribution/registry/handlers + github.com/docker/distribution/cmd/registry + + test + ... + ok github.com/docker/distribution/digest 7.875s + ok github.com/docker/distribution/manifest 0.028s + ok github.com/docker/distribution/notifications 17.322s + ? github.com/docker/distribution/registry [no test files] + ok github.com/docker/distribution/registry/api/v2 0.101s + ? github.com/docker/distribution/registry/auth [no test files] + ok github.com/docker/distribution/registry/auth/silly 0.011s + ... + + /Users/sday/go/src/github.com/docker/distribution/bin/registry + + /Users/sday/go/src/github.com/docker/distribution/bin/registry-api-descriptor-template + + binaries + +The above provides a repeatable build using the contents of the vendor +directory. This includes formatting, vetting, linting, building, +testing and generating tagged binaries. We can verify this worked by running +the registry binary generated in the "./bin" directory: + + $ ./bin/registry --version + ./bin/registry github.com/docker/distribution v2.0.0-alpha.2-80-g16d8b2c.m + +### Optional build tags + +Optional [build tags](http://golang.org/pkg/go/build/) can be provided using +the environment variable `DOCKER_BUILDTAGS`. diff --git a/vendor/github.com/docker/distribution/CONTRIBUTING.md b/vendor/github.com/docker/distribution/CONTRIBUTING.md new file mode 100644 index 0000000000..4c067d9e7e --- /dev/null +++ b/vendor/github.com/docker/distribution/CONTRIBUTING.md @@ -0,0 +1,148 @@ +# Contributing to the registry + +## Before reporting an issue... + +### If your problem is with... + + - automated builds + - your account on the [Docker Hub](https://hub.docker.com/) + - any other [Docker Hub](https://hub.docker.com/) issue + +Then please do not report your issue here - you should instead report it to [https://support.docker.com](https://support.docker.com) + +### If you... + + - need help setting up your registry + - can't figure out something + - are not sure what's going on or what your problem is + +Then please do not open an issue here yet - you should first try one of the following support forums: + + - irc: #docker-distribution on freenode + - mailing-list: or https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution + +### Reporting security issues + +The Docker maintainers take security seriously. If you discover a security +issue, please bring it to their attention right away! + +Please **DO NOT** file a public issue, instead send your report privately to +[security@docker.com](mailto:security@docker.com). + +## Reporting an issue properly + +By following these simple rules you will get better and faster feedback on your issue. + + - search the bugtracker for an already reported issue + +### If you found an issue that describes your problem: + + - please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments + - please refrain from adding "same thing here" or "+1" comments + - you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button + - comment if you have some new, technical and relevant information to add to the case + - __DO NOT__ comment on closed issues or merged PRs. If you think you have a related problem, open up a new issue and reference the PR or issue. + +### If you have not found an existing issue that describes your problem: + + 1. create a new issue, with a succinct title that describes your issue: + - bad title: "It doesn't work with my docker" + - good title: "Private registry push fail: 400 error with E_INVALID_DIGEST" + 2. copy the output of: + - `docker version` + - `docker info` + - `docker exec registry --version` + 3. copy the command line you used to launch your Registry + 4. restart your docker daemon in debug mode (add `-D` to the daemon launch arguments) + 5. reproduce your problem and get your docker daemon logs showing the error + 6. if relevant, copy your registry logs that show the error + 7. provide any relevant detail about your specific Registry configuration (e.g., storage backend used) + 8. indicate if you are using an enterprise proxy, Nginx, or anything else between you and your Registry + +## Contributing a patch for a known bug, or a small correction + +You should follow the basic GitHub workflow: + + 1. fork + 2. commit a change + 3. make sure the tests pass + 4. PR + +Additionally, you must [sign your commits](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work). It's very simple: + + - configure your name with git: `git config user.name "Real Name" && git config user.email mail@example.com` + - sign your commits using `-s`: `git commit -s -m "My commit"` + +Some simple rules to ensure quick merge: + + - clearly point to the issue(s) you want to fix in your PR comment (e.g., `closes #12345`) + - prefer multiple (smaller) PRs addressing individual issues over a big one trying to address multiple issues at once + - if you need to amend your PR following comments, please squash instead of adding more commits + +## Contributing new features + +You are heavily encouraged to first discuss what you want to do. You can do so on the irc channel, or by opening an issue that clearly describes the use case you want to fulfill, or the problem you are trying to solve. + +If this is a major new feature, you should then submit a proposal that describes your technical solution and reasoning. +If you did discuss it first, this will likely be greenlighted very fast. It's advisable to address all feedback on this proposal before starting actual work. + +Then you should submit your implementation, clearly linking to the issue (and possible proposal). + +Your PR will be reviewed by the community, then ultimately by the project maintainers, before being merged. + +It's mandatory to: + + - interact respectfully with other community members and maintainers - more generally, you are expected to abide by the [Docker community rules](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#docker-community-guidelines) + - address maintainers' comments and modify your submission accordingly + - write tests for any new code + +Complying to these simple rules will greatly accelerate the review process, and will ensure you have a pleasant experience in contributing code to the Registry. + +Have a look at a great, successful contribution: the [Swift driver PR](https://github.com/docker/distribution/pull/493) + +## Coding Style + +Unless explicitly stated, we follow all coding guidelines from the Go +community. While some of these standards may seem arbitrary, they somehow seem +to result in a solid, consistent codebase. + +It is possible that the code base does not currently comply with these +guidelines. We are not looking for a massive PR that fixes this, since that +goes against the spirit of the guidelines. All new contributions should make a +best effort to clean up and make the code base better than they left it. +Obviously, apply your best judgement. Remember, the goal here is to make the +code base easier for humans to navigate and understand. Always keep that in +mind when nudging others to comply. + +The rules: + +1. All code should be formatted with `gofmt -s`. +2. All code should pass the default levels of + [`golint`](https://github.com/golang/lint). +3. All code should follow the guidelines covered in [Effective + Go](http://golang.org/doc/effective_go.html) and [Go Code Review + Comments](https://github.com/golang/go/wiki/CodeReviewComments). +4. Comment the code. Tell us the why, the history and the context. +5. Document _all_ declarations and methods, even private ones. Declare + expectations, caveats and anything else that may be important. If a type + gets exported, having the comments already there will ensure it's ready. +6. Variable name length should be proportional to its context and no longer. + `noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`. + In practice, short methods will have short variable names and globals will + have longer names. +7. No underscores in package names. If you need a compound name, step back, + and re-examine why you need a compound name. If you still think you need a + compound name, lose the underscore. +8. No utils or helpers packages. If a function is not general enough to + warrant its own package, it has not been written generally enough to be a + part of a util package. Just leave it unexported and well-documented. +9. All tests should run with `go test` and outside tooling should not be + required. No, we don't need another unit testing framework. Assertion + packages are acceptable if they provide _real_ incremental value. +10. Even though we call these "rules" above, they are actually just + guidelines. Since you've read all the rules, you now know that. + +If you are having trouble getting into the mood of idiomatic Go, we recommend +reading through [Effective Go](http://golang.org/doc/effective_go.html). The +[Go Blog](http://blog.golang.org/) is also a great resource. Drinking the +kool-aid is a lot easier than going thirsty. diff --git a/vendor/github.com/docker/distribution/Dockerfile b/vendor/github.com/docker/distribution/Dockerfile new file mode 100644 index 0000000000..9537817ca3 --- /dev/null +++ b/vendor/github.com/docker/distribution/Dockerfile @@ -0,0 +1,23 @@ +FROM golang:1.11-alpine AS build + +ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution +ENV BUILDTAGS include_oss include_gcs + +ARG GOOS=linux +ARG GOARCH=amd64 +ARG GOARM=6 + +RUN set -ex \ + && apk add --no-cache make git file + +WORKDIR $DISTRIBUTION_DIR +COPY . $DISTRIBUTION_DIR +RUN CGO_ENABLED=0 make PREFIX=/go clean binaries && file ./bin/registry | grep "statically linked" + +FROM alpine +COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml +COPY --from=build /go/src/github.com/docker/distribution/bin/registry /bin/registry +VOLUME ["/var/lib/registry"] +EXPOSE 5000 +ENTRYPOINT ["registry"] +CMD ["serve", "/etc/docker/registry/config.yml"] diff --git a/vendor/github.com/docker/distribution/LICENSE b/vendor/github.com/docker/distribution/LICENSE new file mode 100644 index 0000000000..e06d208186 --- /dev/null +++ b/vendor/github.com/docker/distribution/LICENSE @@ -0,0 +1,202 @@ +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/docker/distribution/MAINTAINERS b/vendor/github.com/docker/distribution/MAINTAINERS new file mode 100644 index 0000000000..3183620c57 --- /dev/null +++ b/vendor/github.com/docker/distribution/MAINTAINERS @@ -0,0 +1,243 @@ +# Distribution maintainers file +# +# This file describes who runs the docker/distribution project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant parser. +# + +[Rules] + + [Rules.maintainers] + + title = "What is a maintainer?" + + text = """ +There are different types of maintainers, with different responsibilities, but +all maintainers have 3 things in common: + +1) They share responsibility in the project's success. +2) They have made a long-term, recurring time investment to improve the project. +3) They spend that time doing whatever needs to be done, not necessarily what +is the most interesting or fun. + +Maintainers are often under-appreciated, because their work is harder to appreciate. +It's easy to appreciate a really cool and technically advanced feature. It's harder +to appreciate the absence of bugs, the slow but steady improvement in stability, +or the reliability of a release process. But those things distinguish a good +project from a great one. +""" + + [Rules.reviewer] + + title = "What is a reviewer?" + + text = """ +A reviewer is a core role within the project. +They share in reviewing issues and pull requests and their LGTM count towards the +required LGTM count to merge a code change into the project. + +Reviewers are part of the organization but do not have write access. +Becoming a reviewer is a core aspect in the journey to becoming a maintainer. +""" + + [Rules.adding-maintainers] + + title = "How are maintainers added?" + + text = """ +Maintainers are first and foremost contributors that have shown they are +committed to the long term success of a project. Contributors wanting to become +maintainers are expected to be deeply involved in contributing code, pull +request review, and triage of issues in the project for more than three months. + +Just contributing does not make you a maintainer, it is about building trust +with the current maintainers of the project and being a person that they can +depend on and trust to make decisions in the best interest of the project. + +Periodically, the existing maintainers curate a list of contributors that have +shown regular activity on the project over the prior months. From this list, +maintainer candidates are selected and proposed on the maintainers mailing list. + +After a candidate has been announced on the maintainers mailing list, the +existing maintainers are given five business days to discuss the candidate, +raise objections and cast their vote. Candidates must be approved by at least 66% of the current maintainers by adding their vote on the mailing +list. Only maintainers of the repository that the candidate is proposed for are +allowed to vote. + +If a candidate is approved, a maintainer will contact the candidate to invite +the candidate to open a pull request that adds the contributor to the +MAINTAINERS file. The candidate becomes a maintainer once the pull request is +merged. +""" + + [Rules.stepping-down-policy] + + title = "Stepping down policy" + + text = """ +Life priorities, interests, and passions can change. If you're a maintainer but +feel you must remove yourself from the list, inform other maintainers that you +intend to step down, and if possible, help find someone to pick up your work. +At the very least, ensure your work can be continued where you left off. + +After you've informed other maintainers, create a pull request to remove +yourself from the MAINTAINERS file. +""" + + [Rules.inactive-maintainers] + + title = "Removal of inactive maintainers" + + text = """ +Similar to the procedure for adding new maintainers, existing maintainers can +be removed from the list if they do not show significant activity on the +project. Periodically, the maintainers review the list of maintainers and their +activity over the last three months. + +If a maintainer has shown insufficient activity over this period, a neutral +person will contact the maintainer to ask if they want to continue being +a maintainer. If the maintainer decides to step down as a maintainer, they +open a pull request to be removed from the MAINTAINERS file. + +If the maintainer wants to remain a maintainer, but is unable to perform the +required duties they can be removed with a vote of at least 66% of +the current maintainers. An e-mail is sent to the +mailing list, inviting maintainers of the project to vote. The voting period is +five business days. Issues related to a maintainer's performance should be +discussed with them among the other maintainers so that they are not surprised +by a pull request removing them. +""" + + [Rules.decisions] + + title = "How are decisions made?" + + text = """ +Short answer: EVERYTHING IS A PULL REQUEST. + +distribution is an open-source project with an open design philosophy. This means +that the repository is the source of truth for EVERY aspect of the project, +including its philosophy, design, road map, and APIs. *If it's part of the +project, it's in the repo. If it's in the repo, it's part of the project.* + +As a result, all decisions can be expressed as changes to the repository. An +implementation change is a change to the source code. An API change is a change +to the API specification. A philosophy change is a change to the philosophy +manifesto, and so on. + +All decisions affecting distribution, big and small, follow the same 3 steps: + +* Step 1: Open a pull request. Anyone can do this. + +* Step 2: Discuss the pull request. Anyone can do this. + +* Step 3: Merge or refuse the pull request. Who does this depends on the nature +of the pull request and which areas of the project it affects. +""" + + [Rules.DCO] + + title = "Helping contributors with the DCO" + + text = """ +The [DCO or `Sign your work`]( +https://github.com/moby/moby/blob/master/CONTRIBUTING.md#sign-your-work) +requirement is not intended as a roadblock or speed bump. + +Some distribution contributors are not as familiar with `git`, or have used a web +based editor, and thus asking them to `git commit --amend -s` is not the best +way forward. + +In this case, maintainers can update the commits based on clause (c) of the DCO. +The most trivial way for a contributor to allow the maintainer to do this, is to +add a DCO signature in a pull requests's comment, or a maintainer can simply +note that the change is sufficiently trivial that it does not substantially +change the existing contribution - i.e., a spelling change. + +When you add someone's DCO, please also add your own to keep a log. +""" + + [Rules."no direct push"] + + title = "I'm a maintainer. Should I make pull requests too?" + + text = """ +Yes. Nobody should ever push to master directly. All changes should be +made through a pull request. +""" + + [Rules.tsc] + + title = "Conflict Resolution and technical disputes" + + text = """ +distribution defers to the [Technical Steering Committee](https://github.com/moby/tsc) for escalations and resolution on disputes for technical matters." + """ + + [Rules.meta] + + title = "How is this process changed?" + + text = "Just like everything else: by making a pull request :)" + +# Current project organization +[Org] + + [Org.Maintainers] + people = [ + "dmcgowan", + "dmp42", + "stevvooe", + ] + [Org.Reviewers] + people = [ + "manishtomar", + "caervs", + "davidswu", + "RobbKistler" + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + + [people.caervs] + Name = "Ryan Abrams" + Email = "rdabrams@gmail.com" + GitHub = "caervs" + + [people.davidswu] + Name = "David Wu" + Email = "dwu7401@gmail.com" + GitHub = "davidswu" + + [people.dmcgowan] + Name = "Derek McGowan" + Email = "derek@mcgstyle.net" + GitHub = "dmcgowan" + + [people.dmp42] + Name = "Olivier Gambier" + Email = "olivier@docker.com" + GitHub = "dmp42" + + [people.manishtomar] + Name = "Manish Tomar" + Email = "manish.tomar@docker.com" + GitHub = "manishtomar" + + [people.RobbKistler] + Name = "Robb Kistler" + Email = "robb.kistler@docker.com" + GitHub = "RobbKistler" + + [people.stevvooe] + Name = "Stephen Day" + Email = "stephen.day@docker.com" + GitHub = "stevvooe" diff --git a/vendor/github.com/docker/distribution/Makefile b/vendor/github.com/docker/distribution/Makefile new file mode 100644 index 0000000000..4635c6eca8 --- /dev/null +++ b/vendor/github.com/docker/distribution/Makefile @@ -0,0 +1,102 @@ +# Root directory of the project (absolute path). +ROOTDIR=$(dir $(abspath $(lastword $(MAKEFILE_LIST)))) + +# Used to populate version variable in main package. +VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always) +REVISION=$(shell git rev-parse HEAD)$(shell if ! git diff --no-ext-diff --quiet --exit-code; then echo .m; fi) + + +PKG=github.com/docker/distribution + +# Project packages. +PACKAGES=$(shell go list -tags "${BUILDTAGS}" ./... | grep -v /vendor/) +INTEGRATION_PACKAGE=${PKG} +COVERAGE_PACKAGES=$(filter-out ${PKG}/registry/storage/driver/%,${PACKAGES}) + + +# Project binaries. +COMMANDS=registry digest registry-api-descriptor-template + +# Allow turning off function inlining and variable registerization +ifeq (${DISABLE_OPTIMIZATION},true) + GO_GCFLAGS=-gcflags "-N -l" + VERSION:="$(VERSION)-noopt" +endif + +WHALE = "+" + +# Go files +# +TESTFLAGS_RACE= +GOFILES=$(shell find . -type f -name '*.go') +GO_TAGS=$(if $(BUILDTAGS),-tags "$(BUILDTAGS)",) +GO_LDFLAGS=-ldflags '-s -w -X $(PKG)/version.Version=$(VERSION) -X $(PKG)/version.Revision=$(REVISION) -X $(PKG)/version.Package=$(PKG) $(EXTRA_LDFLAGS)' + +BINARIES=$(addprefix bin/,$(COMMANDS)) + +# Flags passed to `go test` +TESTFLAGS ?= -v $(TESTFLAGS_RACE) +TESTFLAGS_PARALLEL ?= 8 + +.PHONY: all build binaries check clean test test-race test-full integration coverage +.DEFAULT: all + +all: binaries + +# This only needs to be generated by hand when cutting full releases. +version/version.go: + @echo "$(WHALE) $@" + ./version/version.sh > $@ + +check: ## run all linters (TODO: enable "unused", "varcheck", "ineffassign", "unconvert", "staticheck", "goimports", "structcheck") + @echo "$(WHALE) $@" + gometalinter --config .gometalinter.json ./... + +test: ## run tests, except integration test with test.short + @echo "$(WHALE) $@" + @go test ${GO_TAGS} -test.short ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES}) + +test-race: ## run tests, except integration test with test.short and race + @echo "$(WHALE) $@" + @go test ${GO_TAGS} -race -test.short ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES}) + +test-full: ## run tests, except integration tests + @echo "$(WHALE) $@" + @go test ${GO_TAGS} ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${PACKAGES}) + +integration: ## run integration tests + @echo "$(WHALE) $@" + @go test ${TESTFLAGS} -parallel ${TESTFLAGS_PARALLEL} ${INTEGRATION_PACKAGE} + +coverage: ## generate coverprofiles from the unit tests + @echo "$(WHALE) $@" + @rm -f coverage.txt + @go test ${GO_TAGS} -i ${TESTFLAGS} $(filter-out ${INTEGRATION_PACKAGE},${COVERAGE_PACKAGES}) 2> /dev/null + @( for pkg in $(filter-out ${INTEGRATION_PACKAGE},${COVERAGE_PACKAGES}); do \ + go test ${GO_TAGS} ${TESTFLAGS} \ + -cover \ + -coverprofile=profile.out \ + -covermode=atomic $$pkg || exit; \ + if [ -f profile.out ]; then \ + cat profile.out >> coverage.txt; \ + rm profile.out; \ + fi; \ + done ) + +FORCE: + +# Build a binary from a cmd. +bin/%: cmd/% FORCE + @echo "$(WHALE) $@${BINARY_SUFFIX}" + @go build ${GO_GCFLAGS} ${GO_BUILD_FLAGS} -o $@${BINARY_SUFFIX} ${GO_LDFLAGS} ${GO_TAGS} ./$< + +binaries: $(BINARIES) ## build binaries + @echo "$(WHALE) $@" + +build: + @echo "$(WHALE) $@" + @go build ${GO_GCFLAGS} ${GO_BUILD_FLAGS} ${GO_LDFLAGS} ${GO_TAGS} $(PACKAGES) + +clean: ## clean up binaries + @echo "$(WHALE) $@" + @rm -f $(BINARIES) diff --git a/vendor/github.com/docker/distribution/README.md b/vendor/github.com/docker/distribution/README.md new file mode 100644 index 0000000000..998878850c --- /dev/null +++ b/vendor/github.com/docker/distribution/README.md @@ -0,0 +1,130 @@ +# Distribution + +The Docker toolset to pack, ship, store, and deliver content. + +This repository's main product is the Docker Registry 2.0 implementation +for storing and distributing Docker images. It supersedes the +[docker/docker-registry](https://github.com/docker/docker-registry) +project with a new API design, focused around security and performance. + + + +[![Circle CI](https://circleci.com/gh/docker/distribution/tree/master.svg?style=svg)](https://circleci.com/gh/docker/distribution/tree/master) +[![GoDoc](https://godoc.org/github.com/docker/distribution?status.svg)](https://godoc.org/github.com/docker/distribution) + +This repository contains the following components: + +|**Component** |Description | +|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. | +| **libraries** | A rich set of libraries for interacting with distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. | +| **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) | +| **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/) related just to the registry. | + +### How does this integrate with Docker engine? + +This project should provide an implementation to a V2 API for use in the [Docker +core project](https://github.com/docker/docker). The API should be embeddable +and simplify the process of securely pulling and pushing content from `docker` +daemons. + +### What are the long term goals of the Distribution project? + +The _Distribution_ project has the further long term goal of providing a +secure tool chain for distributing content. The specifications, APIs and tools +should be as useful with Docker as they are without. + +Our goal is to design a professional grade and extensible content distribution +system that allow users to: + +* Enjoy an efficient, secured and reliable way to store, manage, package and + exchange content +* Hack/roll their own on top of healthy open-source components +* Implement their own home made solution through good specs, and solid + extensions mechanism. + +## More about Registry 2.0 + +The new registry implementation provides the following benefits: + +- faster push and pull +- new, more efficient implementation +- simplified deployment +- pluggable storage backend +- webhook notifications + +For information on upcoming functionality, please see [ROADMAP.md](ROADMAP.md). + +### Who needs to deploy a registry? + +By default, Docker users pull images from Docker's public registry instance. +[Installing Docker](https://docs.docker.com/engine/installation/) gives users this +ability. Users can also push images to a repository on Docker's public registry, +if they have a [Docker Hub](https://hub.docker.com/) account. + +For some users and even companies, this default behavior is sufficient. For +others, it is not. + +For example, users with their own software products may want to maintain a +registry for private, company images. Also, you may wish to deploy your own +image repository for images used to test or in continuous integration. For these +use cases and others, [deploying your own registry instance](https://github.com/docker/docker.github.io/blob/master/registry/deploying.md) +may be the better choice. + +### Migration to Registry 2.0 + +For those who have previously deployed their own registry based on the Registry +1.0 implementation and wish to deploy a Registry 2.0 while retaining images, +data migration is required. A tool to assist with migration efforts has been +created. For more information see [docker/migrator](https://github.com/docker/migrator). + +## Contribute + +Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute +issues, fixes, and patches to this project. If you are contributing code, see +the instructions for [building a development environment](BUILDING.md). + +## Support + +If any issues are encountered while using the _Distribution_ project, several +avenues are available for support: + + + + + + + + + + + + + + + + + + +
+ IRC + + #docker-distribution on FreeNode +
+ Issue Tracker + + github.com/docker/distribution/issues +
+ Google Groups + + https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution +
+ Mailing List + + docker@dockerproject.org +
+ + +## License + +This project is distributed under [Apache License, Version 2.0](LICENSE). diff --git a/vendor/github.com/docker/distribution/ROADMAP.md b/vendor/github.com/docker/distribution/ROADMAP.md new file mode 100644 index 0000000000..701127afec --- /dev/null +++ b/vendor/github.com/docker/distribution/ROADMAP.md @@ -0,0 +1,267 @@ +# Roadmap + +The Distribution Project consists of several components, some of which are +still being defined. This document defines the high-level goals of the +project, identifies the current components, and defines the release- +relationship to the Docker Platform. + +* [Distribution Goals](#distribution-goals) +* [Distribution Components](#distribution-components) +* [Project Planning](#project-planning): release-relationship to the Docker Platform. + +This road map is a living document, providing an overview of the goals and +considerations made in respect of the future of the project. + +## Distribution Goals + +- Replace the existing [docker registry](github.com/docker/docker-registry) + implementation as the primary implementation. +- Replace the existing push and pull code in the docker engine with the + distribution package. +- Define a strong data model for distributing docker images +- Provide a flexible distribution tool kit for use in the docker platform +- Unlock new distribution models + +## Distribution Components + +Components of the Distribution Project are managed via github [milestones](https://github.com/docker/distribution/milestones). Upcoming +features and bugfixes for a component will be added to the relevant milestone. If a feature or +bugfix is not part of a milestone, it is currently unscheduled for +implementation. + +* [Registry](#registry) +* [Distribution Package](#distribution-package) + +*** + +### Registry + +The new Docker registry is the main portion of the distribution repository. +Registry 2.0 is the first release of the next-generation registry. This was +primarily focused on implementing the [new registry +API](https://github.com/docker/distribution/blob/master/docs/spec/api.md), +with a focus on security and performance. + +Following from the Distribution project goals above, we have a set of goals +for registry v2 that we would like to follow in the design. New features +should be compared against these goals. + +#### Data Storage and Distribution First + +The registry's first goal is to provide a reliable, consistent storage +location for Docker images. The registry should only provide the minimal +amount of indexing required to fetch image data and no more. + +This means we should be selective in new features and API additions, including +those that may require expensive, ever growing indexes. Requests should be +servable in "constant time". + +#### Content Addressability + +All data objects used in the registry API should be content addressable. +Content identifiers should be secure and verifiable. This provides a secure, +reliable base from which to build more advanced content distribution systems. + +#### Content Agnostic + +In the past, changes to the image format would require large changes in Docker +and the Registry. By decoupling the distribution and image format, we can +allow the formats to progress without having to coordinate between the two. +This means that we should be focused on decoupling Docker from the registry +just as much as decoupling the registry from Docker. Such an approach will +allow us to unlock new distribution models that haven't been possible before. + +We can take this further by saying that the new registry should be content +agnostic. The registry provides a model of names, tags, manifests and content +addresses and that model can be used to work with content. + +#### Simplicity + +The new registry should be closer to a microservice component than its +predecessor. This means it should have a narrower API and a low number of +service dependencies. It should be easy to deploy. + +This means that other solutions should be explored before changing the API or +adding extra dependencies. If functionality is required, can it be added as an +extension or companion service. + +#### Extensibility + +The registry should provide extension points to add functionality. By keeping +the scope narrow, but providing the ability to add functionality. + +Features like search, indexing, synchronization and registry explorers fall +into this category. No such feature should be added unless we've found it +impossible to do through an extension. + +#### Active Feature Discussions + +The following are feature discussions that are currently active. + +If you don't see your favorite, unimplemented feature, feel free to contact us +via IRC or the mailing list and we can talk about adding it. The goal here is +to make sure that new features go through a rigid design process before +landing in the registry. + +##### Proxying to other Registries + +A _pull-through caching_ mode exists for the registry, but is restricted from +within the docker client to only mirror the official Docker Hub. This functionality +can be expanded when image provenance has been specified and implemented in the +distribution project. + +##### Metadata storage + +Metadata for the registry is currently stored with the manifest and layer data on +the storage backend. While this is a big win for simplicity and reliably maintaining +state, it comes with the cost of consistency and high latency. The mutable registry +metadata operations should be abstracted behind an API which will allow ACID compliant +storage systems to handle metadata. + +##### Peer to Peer transfer + +Discussion has started here: https://docs.google.com/document/d/1rYDpSpJiQWmCQy8Cuiaa3NH-Co33oK_SC9HeXYo87QA/edit + +##### Indexing, Search and Discovery + +The original registry provided some implementation of search for use with +private registries. Support has been elided from V2 since we'd like to both +decouple search functionality from the registry. The makes the registry +simpler to deploy, especially in use cases where search is not needed, and +let's us decouple the image format from the registry. + +There are explorations into using the catalog API and notification system to +build external indexes. The current line of thought is that we will define a +common search API to index and query docker images. Such a system could be run +as a companion to a registry or set of registries to power discovery. + +The main issue with search and discovery is that there are so many ways to +accomplish it. There are two aspects to this project. The first is deciding on +how it will be done, including an API definition that can work with changing +data formats. The second is the process of integrating with `docker search`. +We expect that someone attempts to address the problem with the existing tools +and propose it as a standard search API or uses it to inform a standardization +process. Once this has been explored, we integrate with the docker client. + +Please see the following for more detail: + +- https://github.com/docker/distribution/issues/206 + +##### Deletes + +> __NOTE:__ Deletes are a much asked for feature. Before requesting this +feature or participating in discussion, we ask that you read this section in +full and understand the problems behind deletes. + +While, at first glance, implementing deleting seems simple, there are a number +mitigating factors that make many solutions not ideal or even pathological in +the context of a registry. The following paragraph discuss the background and +approaches that could be applied to arrive at a solution. + +The goal of deletes in any system is to remove unused or unneeded data. Only +data requested for deletion should be removed and no other data. Removing +unintended data is worse than _not_ removing data that was requested for +removal but ideally, both are supported. Generally, according to this rule, we +err on holding data longer than needed, ensuring that it is only removed when +we can be certain that it can be removed. With the current behavior, we opt to +hold onto the data forever, ensuring that data cannot be incorrectly removed. + +To understand the problems with implementing deletes, one must understand the +data model. All registry data is stored in a filesystem layout, implemented on +a "storage driver", effectively a _virtual file system_ (VFS). The storage +system must assume that this VFS layer will be eventually consistent and has +poor read- after-write consistency, since this is the lower common denominator +among the storage drivers. This is mitigated by writing values in reverse- +dependent order, but makes wider transactional operations unsafe. + +Layered on the VFS model is a content-addressable _directed, acyclic graph_ +(DAG) made up of blobs. Manifests reference layers. Tags reference manifests. +Since the same data can be referenced by multiple manifests, we only store +data once, even if it is in different repositories. Thus, we have a set of +blobs, referenced by tags and manifests. If we want to delete a blob we need +to be certain that it is no longer referenced by another manifest or tag. When +we delete a manifest, we also can try to delete the referenced blobs. Deciding +whether or not a blob has an active reference is the crux of the problem. + +Conceptually, deleting a manifest and its resources is quite simple. Just find +all the manifests, enumerate the referenced blobs and delete the blobs not in +that set. An astute observer will recognize this as a garbage collection +problem. As with garbage collection in programming languages, this is very +simple when one always has a consistent view. When one adds parallelism and an +inconsistent view of data, it becomes very challenging. + +A simple example can demonstrate this. Let's say we are deleting a manifest +_A_ in one process. We scan the manifest and decide that all the blobs are +ready for deletion. Concurrently, we have another process accepting a new +manifest _B_ referencing one or more blobs from the manifest _A_. Manifest _B_ +is accepted and all the blobs are considered present, so the operation +proceeds. The original process then deletes the referenced blobs, assuming +they were unreferenced. The manifest _B_, which we thought had all of its data +present, can no longer be served by the registry, since the dependent data has +been deleted. + +Deleting data from the registry safely requires some way to coordinate this +operation. The following approaches are being considered: + +- _Reference Counting_ - Maintain a count of references to each blob. This is + challenging for a number of reasons: 1. maintaining a consistent consensus + of reference counts across a set of Registries and 2. Building the initial + list of reference counts for an existing registry. These challenges can be + met with a consensus protocol like Paxos or Raft in the first case and a + necessary but simple scan in the second.. +- _Lock the World GC_ - Halt all writes to the data store. Walk the data store + and find all blob references. Delete all unreferenced blobs. This approach + is very simple but requires disabling writes for a period of time while the + service reads all data. This is slow and expensive but very accurate and + effective. +- _Generational GC_ - Do something similar to above but instead of blocking + writes, writes are sent to another storage backend while reads are broadcast + to the new and old backends. GC is then performed on the read-only portion. + Because writes land in the new backend, the data in the read-only section + can be safely deleted. The main drawbacks of this approach are complexity + and coordination. +- _Centralized Oracle_ - Using a centralized, transactional database, we can + know exactly which data is referenced at any given time. This avoids + coordination problem by managing this data in a single location. We trade + off metadata scalability for simplicity and performance. This is a very good + option for most registry deployments. This would create a bottleneck for + registry metadata. However, metadata is generally not the main bottleneck + when serving images. + +Please let us know if other solutions exist that we have yet to enumerate. +Note that for any approach, implementation is a massive consideration. For +example, a mark-sweep based solution may seem simple but the amount of work in +coordination offset the extra work it might take to build a _Centralized +Oracle_. We'll accept proposals for any solution but please coordinate with us +before dropping code. + +At this time, we have traded off simplicity and ease of deployment for disk +space. Simplicity and ease of deployment tend to reduce developer involvement, +which is currently the most expensive resource in software engineering. Taking +on any solution for deletes will greatly effect these factors, trading off +very cheap disk space for a complex deployment and operational story. + +Please see the following issues for more detail: + +- https://github.com/docker/distribution/issues/422 +- https://github.com/docker/distribution/issues/461 +- https://github.com/docker/distribution/issues/462 + +### Distribution Package + +At its core, the Distribution Project is a set of Go packages that make up +Distribution Components. At this time, most of these packages make up the +Registry implementation. + +The package itself is considered unstable. If you're using it, please take care to vendor the dependent version. + +For feature additions, please see the Registry section. In the future, we may break out a +separate Roadmap for distribution-specific features that apply to more than +just the registry. + +*** + +### Project Planning + +An [Open-Source Planning Process](https://github.com/docker/distribution/wiki/Open-Source-Planning-Process) is used to define the Roadmap. [Project Pages](https://github.com/docker/distribution/wiki) define the goals for each Milestone and identify current progress. + diff --git a/vendor/github.com/docker/distribution/blobs.go b/vendor/github.com/docker/distribution/blobs.go new file mode 100644 index 0000000000..c0e9261be9 --- /dev/null +++ b/vendor/github.com/docker/distribution/blobs.go @@ -0,0 +1,265 @@ +package distribution + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "time" + + "github.com/docker/distribution/reference" + "github.com/opencontainers/go-digest" + "github.com/opencontainers/image-spec/specs-go/v1" +) + +var ( + // ErrBlobExists returned when blob already exists + ErrBlobExists = errors.New("blob exists") + + // ErrBlobDigestUnsupported when blob digest is an unsupported version. + ErrBlobDigestUnsupported = errors.New("unsupported blob digest") + + // ErrBlobUnknown when blob is not found. + ErrBlobUnknown = errors.New("unknown blob") + + // ErrBlobUploadUnknown returned when upload is not found. + ErrBlobUploadUnknown = errors.New("blob upload unknown") + + // ErrBlobInvalidLength returned when the blob has an expected length on + // commit, meaning mismatched with the descriptor or an invalid value. + ErrBlobInvalidLength = errors.New("blob invalid length") +) + +// ErrBlobInvalidDigest returned when digest check fails. +type ErrBlobInvalidDigest struct { + Digest digest.Digest + Reason error +} + +func (err ErrBlobInvalidDigest) Error() string { + return fmt.Sprintf("invalid digest for referenced layer: %v, %v", + err.Digest, err.Reason) +} + +// ErrBlobMounted returned when a blob is mounted from another repository +// instead of initiating an upload session. +type ErrBlobMounted struct { + From reference.Canonical + Descriptor Descriptor +} + +func (err ErrBlobMounted) Error() string { + return fmt.Sprintf("blob mounted from: %v to: %v", + err.From, err.Descriptor) +} + +// Descriptor describes targeted content. Used in conjunction with a blob +// store, a descriptor can be used to fetch, store and target any kind of +// blob. The struct also describes the wire protocol format. Fields should +// only be added but never changed. +type Descriptor struct { + // MediaType describe the type of the content. All text based formats are + // encoded as utf-8. + MediaType string `json:"mediaType,omitempty"` + + // Size in bytes of content. + Size int64 `json:"size,omitempty"` + + // Digest uniquely identifies the content. A byte stream can be verified + // against this digest. + Digest digest.Digest `json:"digest,omitempty"` + + // URLs contains the source URLs of this content. + URLs []string `json:"urls,omitempty"` + + // Annotations contains arbitrary metadata relating to the targeted content. + Annotations map[string]string `json:"annotations,omitempty"` + + // Platform describes the platform which the image in the manifest runs on. + // This should only be used when referring to a manifest. + Platform *v1.Platform `json:"platform,omitempty"` + + // NOTE: Before adding a field here, please ensure that all + // other options have been exhausted. Much of the type relationships + // depend on the simplicity of this type. +} + +// Descriptor returns the descriptor, to make it satisfy the Describable +// interface. Note that implementations of Describable are generally objects +// which can be described, not simply descriptors; this exception is in place +// to make it more convenient to pass actual descriptors to functions that +// expect Describable objects. +func (d Descriptor) Descriptor() Descriptor { + return d +} + +// BlobStatter makes blob descriptors available by digest. The service may +// provide a descriptor of a different digest if the provided digest is not +// canonical. +type BlobStatter interface { + // Stat provides metadata about a blob identified by the digest. If the + // blob is unknown to the describer, ErrBlobUnknown will be returned. + Stat(ctx context.Context, dgst digest.Digest) (Descriptor, error) +} + +// BlobDeleter enables deleting blobs from storage. +type BlobDeleter interface { + Delete(ctx context.Context, dgst digest.Digest) error +} + +// BlobEnumerator enables iterating over blobs from storage +type BlobEnumerator interface { + Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error +} + +// BlobDescriptorService manages metadata about a blob by digest. Most +// implementations will not expose such an interface explicitly. Such mappings +// should be maintained by interacting with the BlobIngester. Hence, this is +// left off of BlobService and BlobStore. +type BlobDescriptorService interface { + BlobStatter + + // SetDescriptor assigns the descriptor to the digest. The provided digest and + // the digest in the descriptor must map to identical content but they may + // differ on their algorithm. The descriptor must have the canonical + // digest of the content and the digest algorithm must match the + // annotators canonical algorithm. + // + // Such a facility can be used to map blobs between digest domains, with + // the restriction that the algorithm of the descriptor must match the + // canonical algorithm (ie sha256) of the annotator. + SetDescriptor(ctx context.Context, dgst digest.Digest, desc Descriptor) error + + // Clear enables descriptors to be unlinked + Clear(ctx context.Context, dgst digest.Digest) error +} + +// BlobDescriptorServiceFactory creates middleware for BlobDescriptorService. +type BlobDescriptorServiceFactory interface { + BlobAccessController(svc BlobDescriptorService) BlobDescriptorService +} + +// ReadSeekCloser is the primary reader type for blob data, combining +// io.ReadSeeker with io.Closer. +type ReadSeekCloser interface { + io.ReadSeeker + io.Closer +} + +// BlobProvider describes operations for getting blob data. +type BlobProvider interface { + // Get returns the entire blob identified by digest along with the descriptor. + Get(ctx context.Context, dgst digest.Digest) ([]byte, error) + + // Open provides a ReadSeekCloser to the blob identified by the provided + // descriptor. If the blob is not known to the service, an error will be + // returned. + Open(ctx context.Context, dgst digest.Digest) (ReadSeekCloser, error) +} + +// BlobServer can serve blobs via http. +type BlobServer interface { + // ServeBlob attempts to serve the blob, identified by dgst, via http. The + // service may decide to redirect the client elsewhere or serve the data + // directly. + // + // This handler only issues successful responses, such as 2xx or 3xx, + // meaning it serves data or issues a redirect. If the blob is not + // available, an error will be returned and the caller may still issue a + // response. + // + // The implementation may serve the same blob from a different digest + // domain. The appropriate headers will be set for the blob, unless they + // have already been set by the caller. + ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error +} + +// BlobIngester ingests blob data. +type BlobIngester interface { + // Put inserts the content p into the blob service, returning a descriptor + // or an error. + Put(ctx context.Context, mediaType string, p []byte) (Descriptor, error) + + // Create allocates a new blob writer to add a blob to this service. The + // returned handle can be written to and later resumed using an opaque + // identifier. With this approach, one can Close and Resume a BlobWriter + // multiple times until the BlobWriter is committed or cancelled. + Create(ctx context.Context, options ...BlobCreateOption) (BlobWriter, error) + + // Resume attempts to resume a write to a blob, identified by an id. + Resume(ctx context.Context, id string) (BlobWriter, error) +} + +// BlobCreateOption is a general extensible function argument for blob creation +// methods. A BlobIngester may choose to honor any or none of the given +// BlobCreateOptions, which can be specific to the implementation of the +// BlobIngester receiving them. +// TODO (brianbland): unify this with ManifestServiceOption in the future +type BlobCreateOption interface { + Apply(interface{}) error +} + +// CreateOptions is a collection of blob creation modifiers relevant to general +// blob storage intended to be configured by the BlobCreateOption.Apply method. +type CreateOptions struct { + Mount struct { + ShouldMount bool + From reference.Canonical + // Stat allows to pass precalculated descriptor to link and return. + // Blob access check will be skipped if set. + Stat *Descriptor + } +} + +// BlobWriter provides a handle for inserting data into a blob store. +// Instances should be obtained from BlobWriteService.Writer and +// BlobWriteService.Resume. If supported by the store, a writer can be +// recovered with the id. +type BlobWriter interface { + io.WriteCloser + io.ReaderFrom + + // Size returns the number of bytes written to this blob. + Size() int64 + + // ID returns the identifier for this writer. The ID can be used with the + // Blob service to later resume the write. + ID() string + + // StartedAt returns the time this blob write was started. + StartedAt() time.Time + + // Commit completes the blob writer process. The content is verified + // against the provided provisional descriptor, which may result in an + // error. Depending on the implementation, written data may be validated + // against the provisional descriptor fields. If MediaType is not present, + // the implementation may reject the commit or assign "application/octet- + // stream" to the blob. The returned descriptor may have a different + // digest depending on the blob store, referred to as the canonical + // descriptor. + Commit(ctx context.Context, provisional Descriptor) (canonical Descriptor, err error) + + // Cancel ends the blob write without storing any data and frees any + // associated resources. Any data written thus far will be lost. Cancel + // implementations should allow multiple calls even after a commit that + // result in a no-op. This allows use of Cancel in a defer statement, + // increasing the assurance that it is correctly called. + Cancel(ctx context.Context) error +} + +// BlobService combines the operations to access, read and write blobs. This +// can be used to describe remote blob services. +type BlobService interface { + BlobStatter + BlobProvider + BlobIngester +} + +// BlobStore represent the entire suite of blob related operations. Such an +// implementation can access, read, write, delete and serve blobs. +type BlobStore interface { + BlobService + BlobServer + BlobDeleter +} diff --git a/vendor/github.com/docker/distribution/digestset/set.go b/vendor/github.com/docker/distribution/digestset/set.go new file mode 100644 index 0000000000..71327dca72 --- /dev/null +++ b/vendor/github.com/docker/distribution/digestset/set.go @@ -0,0 +1,247 @@ +package digestset + +import ( + "errors" + "sort" + "strings" + "sync" + + digest "github.com/opencontainers/go-digest" +) + +var ( + // ErrDigestNotFound is used when a matching digest + // could not be found in a set. + ErrDigestNotFound = errors.New("digest not found") + + // ErrDigestAmbiguous is used when multiple digests + // are found in a set. None of the matching digests + // should be considered valid matches. + ErrDigestAmbiguous = errors.New("ambiguous digest string") +) + +// Set is used to hold a unique set of digests which +// may be easily referenced by easily referenced by a string +// representation of the digest as well as short representation. +// The uniqueness of the short representation is based on other +// digests in the set. If digests are omitted from this set, +// collisions in a larger set may not be detected, therefore it +// is important to always do short representation lookups on +// the complete set of digests. To mitigate collisions, an +// appropriately long short code should be used. +type Set struct { + mutex sync.RWMutex + entries digestEntries +} + +// NewSet creates an empty set of digests +// which may have digests added. +func NewSet() *Set { + return &Set{ + entries: digestEntries{}, + } +} + +// checkShortMatch checks whether two digests match as either whole +// values or short values. This function does not test equality, +// rather whether the second value could match against the first +// value. +func checkShortMatch(alg digest.Algorithm, hex, shortAlg, shortHex string) bool { + if len(hex) == len(shortHex) { + if hex != shortHex { + return false + } + if len(shortAlg) > 0 && string(alg) != shortAlg { + return false + } + } else if !strings.HasPrefix(hex, shortHex) { + return false + } else if len(shortAlg) > 0 && string(alg) != shortAlg { + return false + } + return true +} + +// Lookup looks for a digest matching the given string representation. +// If no digests could be found ErrDigestNotFound will be returned +// with an empty digest value. If multiple matches are found +// ErrDigestAmbiguous will be returned with an empty digest value. +func (dst *Set) Lookup(d string) (digest.Digest, error) { + dst.mutex.RLock() + defer dst.mutex.RUnlock() + if len(dst.entries) == 0 { + return "", ErrDigestNotFound + } + var ( + searchFunc func(int) bool + alg digest.Algorithm + hex string + ) + dgst, err := digest.Parse(d) + if err == digest.ErrDigestInvalidFormat { + hex = d + searchFunc = func(i int) bool { + return dst.entries[i].val >= d + } + } else { + hex = dgst.Hex() + alg = dgst.Algorithm() + searchFunc = func(i int) bool { + if dst.entries[i].val == hex { + return dst.entries[i].alg >= alg + } + return dst.entries[i].val >= hex + } + } + idx := sort.Search(len(dst.entries), searchFunc) + if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) { + return "", ErrDigestNotFound + } + if dst.entries[idx].alg == alg && dst.entries[idx].val == hex { + return dst.entries[idx].digest, nil + } + if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) { + return "", ErrDigestAmbiguous + } + + return dst.entries[idx].digest, nil +} + +// Add adds the given digest to the set. An error will be returned +// if the given digest is invalid. If the digest already exists in the +// set, this operation will be a no-op. +func (dst *Set) Add(d digest.Digest) error { + if err := d.Validate(); err != nil { + return err + } + dst.mutex.Lock() + defer dst.mutex.Unlock() + entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} + searchFunc := func(i int) bool { + if dst.entries[i].val == entry.val { + return dst.entries[i].alg >= entry.alg + } + return dst.entries[i].val >= entry.val + } + idx := sort.Search(len(dst.entries), searchFunc) + if idx == len(dst.entries) { + dst.entries = append(dst.entries, entry) + return nil + } else if dst.entries[idx].digest == d { + return nil + } + + entries := append(dst.entries, nil) + copy(entries[idx+1:], entries[idx:len(entries)-1]) + entries[idx] = entry + dst.entries = entries + return nil +} + +// Remove removes the given digest from the set. An err will be +// returned if the given digest is invalid. If the digest does +// not exist in the set, this operation will be a no-op. +func (dst *Set) Remove(d digest.Digest) error { + if err := d.Validate(); err != nil { + return err + } + dst.mutex.Lock() + defer dst.mutex.Unlock() + entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} + searchFunc := func(i int) bool { + if dst.entries[i].val == entry.val { + return dst.entries[i].alg >= entry.alg + } + return dst.entries[i].val >= entry.val + } + idx := sort.Search(len(dst.entries), searchFunc) + // Not found if idx is after or value at idx is not digest + if idx == len(dst.entries) || dst.entries[idx].digest != d { + return nil + } + + entries := dst.entries + copy(entries[idx:], entries[idx+1:]) + entries = entries[:len(entries)-1] + dst.entries = entries + + return nil +} + +// All returns all the digests in the set +func (dst *Set) All() []digest.Digest { + dst.mutex.RLock() + defer dst.mutex.RUnlock() + retValues := make([]digest.Digest, len(dst.entries)) + for i := range dst.entries { + retValues[i] = dst.entries[i].digest + } + + return retValues +} + +// ShortCodeTable returns a map of Digest to unique short codes. The +// length represents the minimum value, the maximum length may be the +// entire value of digest if uniqueness cannot be achieved without the +// full value. This function will attempt to make short codes as short +// as possible to be unique. +func ShortCodeTable(dst *Set, length int) map[digest.Digest]string { + dst.mutex.RLock() + defer dst.mutex.RUnlock() + m := make(map[digest.Digest]string, len(dst.entries)) + l := length + resetIdx := 0 + for i := 0; i < len(dst.entries); i++ { + var short string + extended := true + for extended { + extended = false + if len(dst.entries[i].val) <= l { + short = dst.entries[i].digest.String() + } else { + short = dst.entries[i].val[:l] + for j := i + 1; j < len(dst.entries); j++ { + if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) { + if j > resetIdx { + resetIdx = j + } + extended = true + } else { + break + } + } + if extended { + l++ + } + } + } + m[dst.entries[i].digest] = short + if i >= resetIdx { + l = length + } + } + return m +} + +type digestEntry struct { + alg digest.Algorithm + val string + digest digest.Digest +} + +type digestEntries []*digestEntry + +func (d digestEntries) Len() int { + return len(d) +} + +func (d digestEntries) Less(i, j int) bool { + if d[i].val != d[j].val { + return d[i].val < d[j].val + } + return d[i].alg < d[j].alg +} + +func (d digestEntries) Swap(i, j int) { + d[i], d[j] = d[j], d[i] +} diff --git a/vendor/github.com/docker/distribution/doc.go b/vendor/github.com/docker/distribution/doc.go new file mode 100644 index 0000000000..bdd8cb708e --- /dev/null +++ b/vendor/github.com/docker/distribution/doc.go @@ -0,0 +1,7 @@ +// Package distribution will define the interfaces for the components of +// docker distribution. The goal is to allow users to reliably package, ship +// and store content related to docker images. +// +// This is currently a work in progress. More details are available in the +// README.md. +package distribution diff --git a/vendor/github.com/docker/distribution/errors.go b/vendor/github.com/docker/distribution/errors.go new file mode 100644 index 0000000000..8e0b788d6c --- /dev/null +++ b/vendor/github.com/docker/distribution/errors.go @@ -0,0 +1,119 @@ +package distribution + +import ( + "errors" + "fmt" + "strings" + + "github.com/opencontainers/go-digest" +) + +// ErrAccessDenied is returned when an access to a requested resource is +// denied. +var ErrAccessDenied = errors.New("access denied") + +// ErrManifestNotModified is returned when a conditional manifest GetByTag +// returns nil due to the client indicating it has the latest version +var ErrManifestNotModified = errors.New("manifest not modified") + +// ErrUnsupported is returned when an unimplemented or unsupported action is +// performed +var ErrUnsupported = errors.New("operation unsupported") + +// ErrSchemaV1Unsupported is returned when a client tries to upload a schema v1 +// manifest but the registry is configured to reject it +var ErrSchemaV1Unsupported = errors.New("manifest schema v1 unsupported") + +// ErrTagUnknown is returned if the given tag is not known by the tag service +type ErrTagUnknown struct { + Tag string +} + +func (err ErrTagUnknown) Error() string { + return fmt.Sprintf("unknown tag=%s", err.Tag) +} + +// ErrRepositoryUnknown is returned if the named repository is not known by +// the registry. +type ErrRepositoryUnknown struct { + Name string +} + +func (err ErrRepositoryUnknown) Error() string { + return fmt.Sprintf("unknown repository name=%s", err.Name) +} + +// ErrRepositoryNameInvalid should be used to denote an invalid repository +// name. Reason may set, indicating the cause of invalidity. +type ErrRepositoryNameInvalid struct { + Name string + Reason error +} + +func (err ErrRepositoryNameInvalid) Error() string { + return fmt.Sprintf("repository name %q invalid: %v", err.Name, err.Reason) +} + +// ErrManifestUnknown is returned if the manifest is not known by the +// registry. +type ErrManifestUnknown struct { + Name string + Tag string +} + +func (err ErrManifestUnknown) Error() string { + return fmt.Sprintf("unknown manifest name=%s tag=%s", err.Name, err.Tag) +} + +// ErrManifestUnknownRevision is returned when a manifest cannot be found by +// revision within a repository. +type ErrManifestUnknownRevision struct { + Name string + Revision digest.Digest +} + +func (err ErrManifestUnknownRevision) Error() string { + return fmt.Sprintf("unknown manifest name=%s revision=%s", err.Name, err.Revision) +} + +// ErrManifestUnverified is returned when the registry is unable to verify +// the manifest. +type ErrManifestUnverified struct{} + +func (ErrManifestUnverified) Error() string { + return "unverified manifest" +} + +// ErrManifestVerification provides a type to collect errors encountered +// during manifest verification. Currently, it accepts errors of all types, +// but it may be narrowed to those involving manifest verification. +type ErrManifestVerification []error + +func (errs ErrManifestVerification) Error() string { + var parts []string + for _, err := range errs { + parts = append(parts, err.Error()) + } + + return fmt.Sprintf("errors verifying manifest: %v", strings.Join(parts, ",")) +} + +// ErrManifestBlobUnknown returned when a referenced blob cannot be found. +type ErrManifestBlobUnknown struct { + Digest digest.Digest +} + +func (err ErrManifestBlobUnknown) Error() string { + return fmt.Sprintf("unknown blob %v on manifest", err.Digest) +} + +// ErrManifestNameInvalid should be used to denote an invalid manifest +// name. Reason may set, indicating the cause of invalidity. +type ErrManifestNameInvalid struct { + Name string + Reason error +} + +func (err ErrManifestNameInvalid) Error() string { + return fmt.Sprintf("manifest name %q invalid: %v", err.Name, err.Reason) +} diff --git a/vendor/github.com/docker/distribution/manifests.go b/vendor/github.com/docker/distribution/manifests.go new file mode 100644 index 0000000000..1816baea1d --- /dev/null +++ b/vendor/github.com/docker/distribution/manifests.go @@ -0,0 +1,125 @@ +package distribution + +import ( + "context" + "fmt" + "mime" + + "github.com/opencontainers/go-digest" +) + +// Manifest represents a registry object specifying a set of +// references and an optional target +type Manifest interface { + // References returns a list of objects which make up this manifest. + // A reference is anything which can be represented by a + // distribution.Descriptor. These can consist of layers, resources or other + // manifests. + // + // While no particular order is required, implementations should return + // them from highest to lowest priority. For example, one might want to + // return the base layer before the top layer. + References() []Descriptor + + // Payload provides the serialized format of the manifest, in addition to + // the media type. + Payload() (mediaType string, payload []byte, err error) +} + +// ManifestBuilder creates a manifest allowing one to include dependencies. +// Instances can be obtained from a version-specific manifest package. Manifest +// specific data is passed into the function which creates the builder. +type ManifestBuilder interface { + // Build creates the manifest from his builder. + Build(ctx context.Context) (Manifest, error) + + // References returns a list of objects which have been added to this + // builder. The dependencies are returned in the order they were added, + // which should be from base to head. + References() []Descriptor + + // AppendReference includes the given object in the manifest after any + // existing dependencies. If the add fails, such as when adding an + // unsupported dependency, an error may be returned. + // + // The destination of the reference is dependent on the manifest type and + // the dependency type. + AppendReference(dependency Describable) error +} + +// ManifestService describes operations on image manifests. +type ManifestService interface { + // Exists returns true if the manifest exists. + Exists(ctx context.Context, dgst digest.Digest) (bool, error) + + // Get retrieves the manifest specified by the given digest + Get(ctx context.Context, dgst digest.Digest, options ...ManifestServiceOption) (Manifest, error) + + // Put creates or updates the given manifest returning the manifest digest + Put(ctx context.Context, manifest Manifest, options ...ManifestServiceOption) (digest.Digest, error) + + // Delete removes the manifest specified by the given digest. Deleting + // a manifest that doesn't exist will return ErrManifestNotFound + Delete(ctx context.Context, dgst digest.Digest) error +} + +// ManifestEnumerator enables iterating over manifests +type ManifestEnumerator interface { + // Enumerate calls ingester for each manifest. + Enumerate(ctx context.Context, ingester func(digest.Digest) error) error +} + +// Describable is an interface for descriptors +type Describable interface { + Descriptor() Descriptor +} + +// ManifestMediaTypes returns the supported media types for manifests. +func ManifestMediaTypes() (mediaTypes []string) { + for t := range mappings { + if t != "" { + mediaTypes = append(mediaTypes, t) + } + } + return +} + +// UnmarshalFunc implements manifest unmarshalling a given MediaType +type UnmarshalFunc func([]byte) (Manifest, Descriptor, error) + +var mappings = make(map[string]UnmarshalFunc, 0) + +// UnmarshalManifest looks up manifest unmarshal functions based on +// MediaType +func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) { + // Need to look up by the actual media type, not the raw contents of + // the header. Strip semicolons and anything following them. + var mediaType string + if ctHeader != "" { + var err error + mediaType, _, err = mime.ParseMediaType(ctHeader) + if err != nil { + return nil, Descriptor{}, err + } + } + + unmarshalFunc, ok := mappings[mediaType] + if !ok { + unmarshalFunc, ok = mappings[""] + if !ok { + return nil, Descriptor{}, fmt.Errorf("unsupported manifest media type and no default available: %s", mediaType) + } + } + + return unmarshalFunc(p) +} + +// RegisterManifestSchema registers an UnmarshalFunc for a given schema type. This +// should be called from specific +func RegisterManifestSchema(mediaType string, u UnmarshalFunc) error { + if _, ok := mappings[mediaType]; ok { + return fmt.Errorf("manifest media type registration would overwrite existing: %s", mediaType) + } + mappings[mediaType] = u + return nil +} diff --git a/vendor/github.com/docker/distribution/metrics/prometheus.go b/vendor/github.com/docker/distribution/metrics/prometheus.go new file mode 100644 index 0000000000..b5a5321448 --- /dev/null +++ b/vendor/github.com/docker/distribution/metrics/prometheus.go @@ -0,0 +1,13 @@ +package metrics + +import "github.com/docker/go-metrics" + +const ( + // NamespacePrefix is the namespace of prometheus metrics + NamespacePrefix = "registry" +) + +var ( + // StorageNamespace is the prometheus namespace of blob/cache related operations + StorageNamespace = metrics.NewNamespace(NamespacePrefix, "storage", nil) +) diff --git a/vendor/github.com/docker/distribution/reference/helpers.go b/vendor/github.com/docker/distribution/reference/helpers.go new file mode 100644 index 0000000000..978df7eabb --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/helpers.go @@ -0,0 +1,42 @@ +package reference + +import "path" + +// IsNameOnly returns true if reference only contains a repo name. +func IsNameOnly(ref Named) bool { + if _, ok := ref.(NamedTagged); ok { + return false + } + if _, ok := ref.(Canonical); ok { + return false + } + return true +} + +// FamiliarName returns the familiar name string +// for the given named, familiarizing if needed. +func FamiliarName(ref Named) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().Name() + } + return ref.Name() +} + +// FamiliarString returns the familiar string representation +// for the given reference, familiarizing if needed. +func FamiliarString(ref Reference) string { + if nn, ok := ref.(normalizedNamed); ok { + return nn.Familiar().String() + } + return ref.String() +} + +// FamiliarMatch reports whether ref matches the specified pattern. +// See https://godoc.org/path#Match for supported patterns. +func FamiliarMatch(pattern string, ref Reference) (bool, error) { + matched, err := path.Match(pattern, FamiliarString(ref)) + if namedRef, isNamed := ref.(Named); isNamed && !matched { + matched, _ = path.Match(pattern, FamiliarName(namedRef)) + } + return matched, err +} diff --git a/vendor/github.com/docker/distribution/reference/normalize.go b/vendor/github.com/docker/distribution/reference/normalize.go new file mode 100644 index 0000000000..2d71fc5e9f --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/normalize.go @@ -0,0 +1,170 @@ +package reference + +import ( + "errors" + "fmt" + "strings" + + "github.com/docker/distribution/digestset" + "github.com/opencontainers/go-digest" +) + +var ( + legacyDefaultDomain = "index.docker.io" + defaultDomain = "docker.io" + officialRepoName = "library" + defaultTag = "latest" +) + +// normalizedNamed represents a name which has been +// normalized and has a familiar form. A familiar name +// is what is used in Docker UI. An example normalized +// name is "docker.io/library/ubuntu" and corresponding +// familiar name of "ubuntu". +type normalizedNamed interface { + Named + Familiar() Named +} + +// ParseNormalizedNamed parses a string into a named reference +// transforming a familiar name from Docker UI to a fully +// qualified reference. If the value may be an identifier +// use ParseAnyReference. +func ParseNormalizedNamed(s string) (Named, error) { + if ok := anchoredIdentifierRegexp.MatchString(s); ok { + return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s) + } + domain, remainder := splitDockerDomain(s) + var remoteName string + if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 { + remoteName = remainder[:tagSep] + } else { + remoteName = remainder + } + if strings.ToLower(remoteName) != remoteName { + return nil, errors.New("invalid reference format: repository name must be lowercase") + } + + ref, err := Parse(domain + "/" + remainder) + if err != nil { + return nil, err + } + named, isNamed := ref.(Named) + if !isNamed { + return nil, fmt.Errorf("reference %s has no name", ref.String()) + } + return named, nil +} + +// splitDockerDomain splits a repository name to domain and remotename string. +// If no valid domain is found, the default domain is used. Repository name +// needs to be already validated before. +func splitDockerDomain(name string) (domain, remainder string) { + i := strings.IndexRune(name, '/') + if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { + domain, remainder = defaultDomain, name + } else { + domain, remainder = name[:i], name[i+1:] + } + if domain == legacyDefaultDomain { + domain = defaultDomain + } + if domain == defaultDomain && !strings.ContainsRune(remainder, '/') { + remainder = officialRepoName + "/" + remainder + } + return +} + +// familiarizeName returns a shortened version of the name familiar +// to to the Docker UI. Familiar names have the default domain +// "docker.io" and "library/" repository prefix removed. +// For example, "docker.io/library/redis" will have the familiar +// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp". +// Returns a familiarized named only reference. +func familiarizeName(named namedRepository) repository { + repo := repository{ + domain: named.Domain(), + path: named.Path(), + } + + if repo.domain == defaultDomain { + repo.domain = "" + // Handle official repositories which have the pattern "library/" + if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName { + repo.path = split[1] + } + } + return repo +} + +func (r reference) Familiar() Named { + return reference{ + namedRepository: familiarizeName(r.namedRepository), + tag: r.tag, + digest: r.digest, + } +} + +func (r repository) Familiar() Named { + return familiarizeName(r) +} + +func (t taggedReference) Familiar() Named { + return taggedReference{ + namedRepository: familiarizeName(t.namedRepository), + tag: t.tag, + } +} + +func (c canonicalReference) Familiar() Named { + return canonicalReference{ + namedRepository: familiarizeName(c.namedRepository), + digest: c.digest, + } +} + +// TagNameOnly adds the default tag "latest" to a reference if it only has +// a repo name. +func TagNameOnly(ref Named) Named { + if IsNameOnly(ref) { + namedTagged, err := WithTag(ref, defaultTag) + if err != nil { + // Default tag must be valid, to create a NamedTagged + // type with non-validated input the WithTag function + // should be used instead + panic(err) + } + return namedTagged + } + return ref +} + +// ParseAnyReference parses a reference string as a possible identifier, +// full digest, or familiar name. +func ParseAnyReference(ref string) (Reference, error) { + if ok := anchoredIdentifierRegexp.MatchString(ref); ok { + return digestReference("sha256:" + ref), nil + } + if dgst, err := digest.Parse(ref); err == nil { + return digestReference(dgst), nil + } + + return ParseNormalizedNamed(ref) +} + +// ParseAnyReferenceWithSet parses a reference string as a possible short +// identifier to be matched in a digest set, a full digest, or familiar name. +func ParseAnyReferenceWithSet(ref string, ds *digestset.Set) (Reference, error) { + if ok := anchoredShortIdentifierRegexp.MatchString(ref); ok { + dgst, err := ds.Lookup(ref) + if err == nil { + return digestReference(dgst), nil + } + } else { + if dgst, err := digest.Parse(ref); err == nil { + return digestReference(dgst), nil + } + } + + return ParseNormalizedNamed(ref) +} diff --git a/vendor/github.com/docker/distribution/reference/reference.go b/vendor/github.com/docker/distribution/reference/reference.go new file mode 100644 index 0000000000..2f66cca87a --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/reference.go @@ -0,0 +1,433 @@ +// Package reference provides a general type to represent any way of referencing images within the registry. +// Its main purpose is to abstract tags and digests (content-addressable hash). +// +// Grammar +// +// reference := name [ ":" tag ] [ "@" digest ] +// name := [domain '/'] path-component ['/' path-component]* +// domain := domain-component ['.' domain-component]* [':' port-number] +// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ +// port-number := /[0-9]+/ +// path-component := alpha-numeric [separator alpha-numeric]* +// alpha-numeric := /[a-z0-9]+/ +// separator := /[_.]|__|[-]*/ +// +// tag := /[\w][\w.-]{0,127}/ +// +// digest := digest-algorithm ":" digest-hex +// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]* +// digest-algorithm-separator := /[+.-_]/ +// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ +// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value +// +// identifier := /[a-f0-9]{64}/ +// short-identifier := /[a-f0-9]{6,64}/ +package reference + +import ( + "errors" + "fmt" + "strings" + + "github.com/opencontainers/go-digest" +) + +const ( + // NameTotalLengthMax is the maximum total number of characters in a repository name. + NameTotalLengthMax = 255 +) + +var ( + // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. + ErrReferenceInvalidFormat = errors.New("invalid reference format") + + // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. + ErrTagInvalidFormat = errors.New("invalid tag format") + + // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. + ErrDigestInvalidFormat = errors.New("invalid digest format") + + // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters. + ErrNameContainsUppercase = errors.New("repository name must be lowercase") + + // ErrNameEmpty is returned for empty, invalid repository names. + ErrNameEmpty = errors.New("repository name must have at least one component") + + // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. + ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) + + // ErrNameNotCanonical is returned when a name is not canonical. + ErrNameNotCanonical = errors.New("repository name must be canonical") +) + +// Reference is an opaque object reference identifier that may include +// modifiers such as a hostname, name, tag, and digest. +type Reference interface { + // String returns the full reference + String() string +} + +// Field provides a wrapper type for resolving correct reference types when +// working with encoding. +type Field struct { + reference Reference +} + +// AsField wraps a reference in a Field for encoding. +func AsField(reference Reference) Field { + return Field{reference} +} + +// Reference unwraps the reference type from the field to +// return the Reference object. This object should be +// of the appropriate type to further check for different +// reference types. +func (f Field) Reference() Reference { + return f.reference +} + +// MarshalText serializes the field to byte text which +// is the string of the reference. +func (f Field) MarshalText() (p []byte, err error) { + return []byte(f.reference.String()), nil +} + +// UnmarshalText parses text bytes by invoking the +// reference parser to ensure the appropriately +// typed reference object is wrapped by field. +func (f *Field) UnmarshalText(p []byte) error { + r, err := Parse(string(p)) + if err != nil { + return err + } + + f.reference = r + return nil +} + +// Named is an object with a full name +type Named interface { + Reference + Name() string +} + +// Tagged is an object which has a tag +type Tagged interface { + Reference + Tag() string +} + +// NamedTagged is an object including a name and tag. +type NamedTagged interface { + Named + Tag() string +} + +// Digested is an object which has a digest +// in which it can be referenced by +type Digested interface { + Reference + Digest() digest.Digest +} + +// Canonical reference is an object with a fully unique +// name including a name with domain and digest +type Canonical interface { + Named + Digest() digest.Digest +} + +// namedRepository is a reference to a repository with a name. +// A namedRepository has both domain and path components. +type namedRepository interface { + Named + Domain() string + Path() string +} + +// Domain returns the domain part of the Named reference +func Domain(named Named) string { + if r, ok := named.(namedRepository); ok { + return r.Domain() + } + domain, _ := splitDomain(named.Name()) + return domain +} + +// Path returns the name without the domain part of the Named reference +func Path(named Named) (name string) { + if r, ok := named.(namedRepository); ok { + return r.Path() + } + _, path := splitDomain(named.Name()) + return path +} + +func splitDomain(name string) (string, string) { + match := anchoredNameRegexp.FindStringSubmatch(name) + if len(match) != 3 { + return "", name + } + return match[1], match[2] +} + +// SplitHostname splits a named reference into a +// hostname and name string. If no valid hostname is +// found, the hostname is empty and the full value +// is returned as name +// DEPRECATED: Use Domain or Path +func SplitHostname(named Named) (string, string) { + if r, ok := named.(namedRepository); ok { + return r.Domain(), r.Path() + } + return splitDomain(named.Name()) +} + +// Parse parses s and returns a syntactically valid Reference. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: Parse will not handle short digests. +func Parse(s string) (Reference, error) { + matches := ReferenceRegexp.FindStringSubmatch(s) + if matches == nil { + if s == "" { + return nil, ErrNameEmpty + } + if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil { + return nil, ErrNameContainsUppercase + } + return nil, ErrReferenceInvalidFormat + } + + if len(matches[1]) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + var repo repository + + nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1]) + if nameMatch != nil && len(nameMatch) == 3 { + repo.domain = nameMatch[1] + repo.path = nameMatch[2] + } else { + repo.domain = "" + repo.path = matches[1] + } + + ref := reference{ + namedRepository: repo, + tag: matches[2], + } + if matches[3] != "" { + var err error + ref.digest, err = digest.Parse(matches[3]) + if err != nil { + return nil, err + } + } + + r := getBestReferenceType(ref) + if r == nil { + return nil, ErrNameEmpty + } + + return r, nil +} + +// ParseNamed parses s and returns a syntactically valid reference implementing +// the Named interface. The reference must have a name and be in the canonical +// form, otherwise an error is returned. +// If an error was encountered it is returned, along with a nil Reference. +// NOTE: ParseNamed will not handle short digests. +func ParseNamed(s string) (Named, error) { + named, err := ParseNormalizedNamed(s) + if err != nil { + return nil, err + } + if named.String() != s { + return nil, ErrNameNotCanonical + } + return named, nil +} + +// WithName returns a named object representing the given string. If the input +// is invalid ErrReferenceInvalidFormat will be returned. +func WithName(name string) (Named, error) { + if len(name) > NameTotalLengthMax { + return nil, ErrNameTooLong + } + + match := anchoredNameRegexp.FindStringSubmatch(name) + if match == nil || len(match) != 3 { + return nil, ErrReferenceInvalidFormat + } + return repository{ + domain: match[1], + path: match[2], + }, nil +} + +// WithTag combines the name from "name" and the tag from "tag" to form a +// reference incorporating both the name and the tag. +func WithTag(name Named, tag string) (NamedTagged, error) { + if !anchoredTagRegexp.MatchString(tag) { + return nil, ErrTagInvalidFormat + } + var repo repository + if r, ok := name.(namedRepository); ok { + repo.domain = r.Domain() + repo.path = r.Path() + } else { + repo.path = name.Name() + } + if canonical, ok := name.(Canonical); ok { + return reference{ + namedRepository: repo, + tag: tag, + digest: canonical.Digest(), + }, nil + } + return taggedReference{ + namedRepository: repo, + tag: tag, + }, nil +} + +// WithDigest combines the name from "name" and the digest from "digest" to form +// a reference incorporating both the name and the digest. +func WithDigest(name Named, digest digest.Digest) (Canonical, error) { + if !anchoredDigestRegexp.MatchString(digest.String()) { + return nil, ErrDigestInvalidFormat + } + var repo repository + if r, ok := name.(namedRepository); ok { + repo.domain = r.Domain() + repo.path = r.Path() + } else { + repo.path = name.Name() + } + if tagged, ok := name.(Tagged); ok { + return reference{ + namedRepository: repo, + tag: tagged.Tag(), + digest: digest, + }, nil + } + return canonicalReference{ + namedRepository: repo, + digest: digest, + }, nil +} + +// TrimNamed removes any tag or digest from the named reference. +func TrimNamed(ref Named) Named { + domain, path := SplitHostname(ref) + return repository{ + domain: domain, + path: path, + } +} + +func getBestReferenceType(ref reference) Reference { + if ref.Name() == "" { + // Allow digest only references + if ref.digest != "" { + return digestReference(ref.digest) + } + return nil + } + if ref.tag == "" { + if ref.digest != "" { + return canonicalReference{ + namedRepository: ref.namedRepository, + digest: ref.digest, + } + } + return ref.namedRepository + } + if ref.digest == "" { + return taggedReference{ + namedRepository: ref.namedRepository, + tag: ref.tag, + } + } + + return ref +} + +type reference struct { + namedRepository + tag string + digest digest.Digest +} + +func (r reference) String() string { + return r.Name() + ":" + r.tag + "@" + r.digest.String() +} + +func (r reference) Tag() string { + return r.tag +} + +func (r reference) Digest() digest.Digest { + return r.digest +} + +type repository struct { + domain string + path string +} + +func (r repository) String() string { + return r.Name() +} + +func (r repository) Name() string { + if r.domain == "" { + return r.path + } + return r.domain + "/" + r.path +} + +func (r repository) Domain() string { + return r.domain +} + +func (r repository) Path() string { + return r.path +} + +type digestReference digest.Digest + +func (d digestReference) String() string { + return digest.Digest(d).String() +} + +func (d digestReference) Digest() digest.Digest { + return digest.Digest(d) +} + +type taggedReference struct { + namedRepository + tag string +} + +func (t taggedReference) String() string { + return t.Name() + ":" + t.tag +} + +func (t taggedReference) Tag() string { + return t.tag +} + +type canonicalReference struct { + namedRepository + digest digest.Digest +} + +func (c canonicalReference) String() string { + return c.Name() + "@" + c.digest.String() +} + +func (c canonicalReference) Digest() digest.Digest { + return c.digest +} diff --git a/vendor/github.com/docker/distribution/reference/regexp.go b/vendor/github.com/docker/distribution/reference/regexp.go new file mode 100644 index 0000000000..7860349320 --- /dev/null +++ b/vendor/github.com/docker/distribution/reference/regexp.go @@ -0,0 +1,143 @@ +package reference + +import "regexp" + +var ( + // alphaNumericRegexp defines the alpha numeric atom, typically a + // component of names. This only allows lower case characters and digits. + alphaNumericRegexp = match(`[a-z0-9]+`) + + // separatorRegexp defines the separators allowed to be embedded in name + // components. This allow one period, one or two underscore and multiple + // dashes. + separatorRegexp = match(`(?:[._]|__|[-]*)`) + + // nameComponentRegexp restricts registry path component names to start + // with at least one letter or number, with following parts able to be + // separated by one period, one or two underscore and multiple dashes. + nameComponentRegexp = expression( + alphaNumericRegexp, + optional(repeated(separatorRegexp, alphaNumericRegexp))) + + // domainComponentRegexp restricts the registry domain component of a + // repository name to start with a component as defined by DomainRegexp + // and followed by an optional port. + domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) + + // DomainRegexp defines the structure of potential domain components + // that may be part of image names. This is purposely a subset of what is + // allowed by DNS to ensure backwards compatibility with Docker image + // names. + DomainRegexp = expression( + domainComponentRegexp, + optional(repeated(literal(`.`), domainComponentRegexp)), + optional(literal(`:`), match(`[0-9]+`))) + + // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. + TagRegexp = match(`[\w][\w.-]{0,127}`) + + // anchoredTagRegexp matches valid tag names, anchored at the start and + // end of the matched string. + anchoredTagRegexp = anchored(TagRegexp) + + // DigestRegexp matches valid digests. + DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) + + // anchoredDigestRegexp matches valid digests, anchored at the start and + // end of the matched string. + anchoredDigestRegexp = anchored(DigestRegexp) + + // NameRegexp is the format for the name component of references. The + // regexp has capturing groups for the domain and name part omitting + // the separating forward slash from either. + NameRegexp = expression( + optional(DomainRegexp, literal(`/`)), + nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp))) + + // anchoredNameRegexp is used to parse a name value, capturing the + // domain and trailing components. + anchoredNameRegexp = anchored( + optional(capture(DomainRegexp), literal(`/`)), + capture(nameComponentRegexp, + optional(repeated(literal(`/`), nameComponentRegexp)))) + + // ReferenceRegexp is the full supported format of a reference. The regexp + // is anchored and has capturing groups for name, tag, and digest + // components. + ReferenceRegexp = anchored(capture(NameRegexp), + optional(literal(":"), capture(TagRegexp)), + optional(literal("@"), capture(DigestRegexp))) + + // IdentifierRegexp is the format for string identifier used as a + // content addressable identifier using sha256. These identifiers + // are like digests without the algorithm, since sha256 is used. + IdentifierRegexp = match(`([a-f0-9]{64})`) + + // ShortIdentifierRegexp is the format used to represent a prefix + // of an identifier. A prefix may be used to match a sha256 identifier + // within a list of trusted identifiers. + ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`) + + // anchoredIdentifierRegexp is used to check or match an + // identifier value, anchored at start and end of string. + anchoredIdentifierRegexp = anchored(IdentifierRegexp) + + // anchoredShortIdentifierRegexp is used to check if a value + // is a possible identifier prefix, anchored at start and end + // of string. + anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp) +) + +// match compiles the string to a regular expression. +var match = regexp.MustCompile + +// literal compiles s into a literal regular expression, escaping any regexp +// reserved characters. +func literal(s string) *regexp.Regexp { + re := match(regexp.QuoteMeta(s)) + + if _, complete := re.LiteralPrefix(); !complete { + panic("must be a literal") + } + + return re +} + +// expression defines a full expression, where each regular expression must +// follow the previous. +func expression(res ...*regexp.Regexp) *regexp.Regexp { + var s string + for _, re := range res { + s += re.String() + } + + return match(s) +} + +// optional wraps the expression in a non-capturing group and makes the +// production optional. +func optional(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `?`) +} + +// repeated wraps the regexp in a non-capturing group to get one or more +// matches. +func repeated(res ...*regexp.Regexp) *regexp.Regexp { + return match(group(expression(res...)).String() + `+`) +} + +// group wraps the regexp in a non-capturing group. +func group(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(?:` + expression(res...).String() + `)`) +} + +// capture wraps the expression in a capturing group. +func capture(res ...*regexp.Regexp) *regexp.Regexp { + return match(`(` + expression(res...).String() + `)`) +} + +// anchored anchors the regular expression by adding start and end delimiters. +func anchored(res ...*regexp.Regexp) *regexp.Regexp { + return match(`^` + expression(res...).String() + `$`) +} diff --git a/vendor/github.com/docker/distribution/registry.go b/vendor/github.com/docker/distribution/registry.go new file mode 100644 index 0000000000..6c32109894 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry.go @@ -0,0 +1,118 @@ +package distribution + +import ( + "context" + + "github.com/docker/distribution/reference" +) + +// Scope defines the set of items that match a namespace. +type Scope interface { + // Contains returns true if the name belongs to the namespace. + Contains(name string) bool +} + +type fullScope struct{} + +func (f fullScope) Contains(string) bool { + return true +} + +// GlobalScope represents the full namespace scope which contains +// all other scopes. +var GlobalScope = Scope(fullScope{}) + +// Namespace represents a collection of repositories, addressable by name. +// Generally, a namespace is backed by a set of one or more services, +// providing facilities such as registry access, trust, and indexing. +type Namespace interface { + // Scope describes the names that can be used with this Namespace. The + // global namespace will have a scope that matches all names. The scope + // effectively provides an identity for the namespace. + Scope() Scope + + // Repository should return a reference to the named repository. The + // registry may or may not have the repository but should always return a + // reference. + Repository(ctx context.Context, name reference.Named) (Repository, error) + + // Repositories fills 'repos' with a lexicographically sorted catalog of repositories + // up to the size of 'repos' and returns the value 'n' for the number of entries + // which were filled. 'last' contains an offset in the catalog, and 'err' will be + // set to io.EOF if there are no more entries to obtain. + Repositories(ctx context.Context, repos []string, last string) (n int, err error) + + // Blobs returns a blob enumerator to access all blobs + Blobs() BlobEnumerator + + // BlobStatter returns a BlobStatter to control + BlobStatter() BlobStatter +} + +// RepositoryEnumerator describes an operation to enumerate repositories +type RepositoryEnumerator interface { + Enumerate(ctx context.Context, ingester func(string) error) error +} + +// RepositoryRemover removes given repository +type RepositoryRemover interface { + Remove(ctx context.Context, name reference.Named) error +} + +// ManifestServiceOption is a function argument for Manifest Service methods +type ManifestServiceOption interface { + Apply(ManifestService) error +} + +// WithTag allows a tag to be passed into Put +func WithTag(tag string) ManifestServiceOption { + return WithTagOption{tag} +} + +// WithTagOption holds a tag +type WithTagOption struct{ Tag string } + +// Apply conforms to the ManifestServiceOption interface +func (o WithTagOption) Apply(m ManifestService) error { + // no implementation + return nil +} + +// WithManifestMediaTypes lists the media types the client wishes +// the server to provide. +func WithManifestMediaTypes(mediaTypes []string) ManifestServiceOption { + return WithManifestMediaTypesOption{mediaTypes} +} + +// WithManifestMediaTypesOption holds a list of accepted media types +type WithManifestMediaTypesOption struct{ MediaTypes []string } + +// Apply conforms to the ManifestServiceOption interface +func (o WithManifestMediaTypesOption) Apply(m ManifestService) error { + // no implementation + return nil +} + +// Repository is a named collection of manifests and layers. +type Repository interface { + // Named returns the name of the repository. + Named() reference.Named + + // Manifests returns a reference to this repository's manifest service. + // with the supplied options applied. + Manifests(ctx context.Context, options ...ManifestServiceOption) (ManifestService, error) + + // Blobs returns a reference to this repository's blob service. + Blobs(ctx context.Context) BlobStore + + // TODO(stevvooe): The above BlobStore return can probably be relaxed to + // be a BlobService for use with clients. This will allow such + // implementations to avoid implementing ServeBlob. + + // Tags returns a reference to this repositories tag service + Tags(ctx context.Context) TagService +} + +// TODO(stevvooe): Must add close methods to all these. May want to change the +// way instances are created to better reflect internal dependency +// relationships. diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/errors.go b/vendor/github.com/docker/distribution/registry/api/errcode/errors.go new file mode 100644 index 0000000000..6d9bb4b62a --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/errcode/errors.go @@ -0,0 +1,267 @@ +package errcode + +import ( + "encoding/json" + "fmt" + "strings" +) + +// ErrorCoder is the base interface for ErrorCode and Error allowing +// users of each to just call ErrorCode to get the real ID of each +type ErrorCoder interface { + ErrorCode() ErrorCode +} + +// ErrorCode represents the error type. The errors are serialized via strings +// and the integer format may change and should *never* be exported. +type ErrorCode int + +var _ error = ErrorCode(0) + +// ErrorCode just returns itself +func (ec ErrorCode) ErrorCode() ErrorCode { + return ec +} + +// Error returns the ID/Value +func (ec ErrorCode) Error() string { + // NOTE(stevvooe): Cannot use message here since it may have unpopulated args. + return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1)) +} + +// Descriptor returns the descriptor for the error code. +func (ec ErrorCode) Descriptor() ErrorDescriptor { + d, ok := errorCodeToDescriptors[ec] + + if !ok { + return ErrorCodeUnknown.Descriptor() + } + + return d +} + +// String returns the canonical identifier for this error code. +func (ec ErrorCode) String() string { + return ec.Descriptor().Value +} + +// Message returned the human-readable error message for this error code. +func (ec ErrorCode) Message() string { + return ec.Descriptor().Message +} + +// MarshalText encodes the receiver into UTF-8-encoded text and returns the +// result. +func (ec ErrorCode) MarshalText() (text []byte, err error) { + return []byte(ec.String()), nil +} + +// UnmarshalText decodes the form generated by MarshalText. +func (ec *ErrorCode) UnmarshalText(text []byte) error { + desc, ok := idToDescriptors[string(text)] + + if !ok { + desc = ErrorCodeUnknown.Descriptor() + } + + *ec = desc.Code + + return nil +} + +// WithMessage creates a new Error struct based on the passed-in info and +// overrides the Message property. +func (ec ErrorCode) WithMessage(message string) Error { + return Error{ + Code: ec, + Message: message, + } +} + +// WithDetail creates a new Error struct based on the passed-in info and +// set the Detail property appropriately +func (ec ErrorCode) WithDetail(detail interface{}) Error { + return Error{ + Code: ec, + Message: ec.Message(), + }.WithDetail(detail) +} + +// WithArgs creates a new Error struct and sets the Args slice +func (ec ErrorCode) WithArgs(args ...interface{}) Error { + return Error{ + Code: ec, + Message: ec.Message(), + }.WithArgs(args...) +} + +// Error provides a wrapper around ErrorCode with extra Details provided. +type Error struct { + Code ErrorCode `json:"code"` + Message string `json:"message"` + Detail interface{} `json:"detail,omitempty"` + + // TODO(duglin): See if we need an "args" property so we can do the + // variable substitution right before showing the message to the user +} + +var _ error = Error{} + +// ErrorCode returns the ID/Value of this Error +func (e Error) ErrorCode() ErrorCode { + return e.Code +} + +// Error returns a human readable representation of the error. +func (e Error) Error() string { + return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message) +} + +// WithDetail will return a new Error, based on the current one, but with +// some Detail info added +func (e Error) WithDetail(detail interface{}) Error { + return Error{ + Code: e.Code, + Message: e.Message, + Detail: detail, + } +} + +// WithArgs uses the passed-in list of interface{} as the substitution +// variables in the Error's Message string, but returns a new Error +func (e Error) WithArgs(args ...interface{}) Error { + return Error{ + Code: e.Code, + Message: fmt.Sprintf(e.Code.Message(), args...), + Detail: e.Detail, + } +} + +// ErrorDescriptor provides relevant information about a given error code. +type ErrorDescriptor struct { + // Code is the error code that this descriptor describes. + Code ErrorCode + + // Value provides a unique, string key, often captilized with + // underscores, to identify the error code. This value is used as the + // keyed value when serializing api errors. + Value string + + // Message is a short, human readable decription of the error condition + // included in API responses. + Message string + + // Description provides a complete account of the errors purpose, suitable + // for use in documentation. + Description string + + // HTTPStatusCode provides the http status code that is associated with + // this error condition. + HTTPStatusCode int +} + +// ParseErrorCode returns the value by the string error code. +// `ErrorCodeUnknown` will be returned if the error is not known. +func ParseErrorCode(value string) ErrorCode { + ed, ok := idToDescriptors[value] + if ok { + return ed.Code + } + + return ErrorCodeUnknown +} + +// Errors provides the envelope for multiple errors and a few sugar methods +// for use within the application. +type Errors []error + +var _ error = Errors{} + +func (errs Errors) Error() string { + switch len(errs) { + case 0: + return "" + case 1: + return errs[0].Error() + default: + msg := "errors:\n" + for _, err := range errs { + msg += err.Error() + "\n" + } + return msg + } +} + +// Len returns the current number of errors. +func (errs Errors) Len() int { + return len(errs) +} + +// MarshalJSON converts slice of error, ErrorCode or Error into a +// slice of Error - then serializes +func (errs Errors) MarshalJSON() ([]byte, error) { + var tmpErrs struct { + Errors []Error `json:"errors,omitempty"` + } + + for _, daErr := range errs { + var err Error + + switch daErr.(type) { + case ErrorCode: + err = daErr.(ErrorCode).WithDetail(nil) + case Error: + err = daErr.(Error) + default: + err = ErrorCodeUnknown.WithDetail(daErr) + + } + + // If the Error struct was setup and they forgot to set the + // Message field (meaning its "") then grab it from the ErrCode + msg := err.Message + if msg == "" { + msg = err.Code.Message() + } + + tmpErrs.Errors = append(tmpErrs.Errors, Error{ + Code: err.Code, + Message: msg, + Detail: err.Detail, + }) + } + + return json.Marshal(tmpErrs) +} + +// UnmarshalJSON deserializes []Error and then converts it into slice of +// Error or ErrorCode +func (errs *Errors) UnmarshalJSON(data []byte) error { + var tmpErrs struct { + Errors []Error + } + + if err := json.Unmarshal(data, &tmpErrs); err != nil { + return err + } + + var newErrs Errors + for _, daErr := range tmpErrs.Errors { + // If Message is empty or exactly matches the Code's message string + // then just use the Code, no need for a full Error struct + if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) { + // Error's w/o details get converted to ErrorCode + newErrs = append(newErrs, daErr.Code) + } else { + // Error's w/ details are untouched + newErrs = append(newErrs, Error{ + Code: daErr.Code, + Message: daErr.Message, + Detail: daErr.Detail, + }) + } + } + + *errs = newErrs + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/handler.go b/vendor/github.com/docker/distribution/registry/api/errcode/handler.go new file mode 100644 index 0000000000..d77e70473e --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/errcode/handler.go @@ -0,0 +1,40 @@ +package errcode + +import ( + "encoding/json" + "net/http" +) + +// ServeJSON attempts to serve the errcode in a JSON envelope. It marshals err +// and sets the content-type header to 'application/json'. It will handle +// ErrorCoder and Errors, and if necessary will create an envelope. +func ServeJSON(w http.ResponseWriter, err error) error { + w.Header().Set("Content-Type", "application/json; charset=utf-8") + var sc int + + switch errs := err.(type) { + case Errors: + if len(errs) < 1 { + break + } + + if err, ok := errs[0].(ErrorCoder); ok { + sc = err.ErrorCode().Descriptor().HTTPStatusCode + } + case ErrorCoder: + sc = errs.ErrorCode().Descriptor().HTTPStatusCode + err = Errors{err} // create an envelope. + default: + // We just have an unhandled error type, so just place in an envelope + // and move along. + err = Errors{err} + } + + if sc == 0 { + sc = http.StatusInternalServerError + } + + w.WriteHeader(sc) + + return json.NewEncoder(w).Encode(err) +} diff --git a/vendor/github.com/docker/distribution/registry/api/errcode/register.go b/vendor/github.com/docker/distribution/registry/api/errcode/register.go new file mode 100644 index 0000000000..d1e8826c6d --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/errcode/register.go @@ -0,0 +1,138 @@ +package errcode + +import ( + "fmt" + "net/http" + "sort" + "sync" +) + +var ( + errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{} + idToDescriptors = map[string]ErrorDescriptor{} + groupToDescriptors = map[string][]ErrorDescriptor{} +) + +var ( + // ErrorCodeUnknown is a generic error that can be used as a last + // resort if there is no situation-specific error message that can be used + ErrorCodeUnknown = Register("errcode", ErrorDescriptor{ + Value: "UNKNOWN", + Message: "unknown error", + Description: `Generic error returned when the error does not have an + API classification.`, + HTTPStatusCode: http.StatusInternalServerError, + }) + + // ErrorCodeUnsupported is returned when an operation is not supported. + ErrorCodeUnsupported = Register("errcode", ErrorDescriptor{ + Value: "UNSUPPORTED", + Message: "The operation is unsupported.", + Description: `The operation was unsupported due to a missing + implementation or invalid set of parameters.`, + HTTPStatusCode: http.StatusMethodNotAllowed, + }) + + // ErrorCodeUnauthorized is returned if a request requires + // authentication. + ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{ + Value: "UNAUTHORIZED", + Message: "authentication required", + Description: `The access controller was unable to authenticate + the client. Often this will be accompanied by a + Www-Authenticate HTTP response header indicating how to + authenticate.`, + HTTPStatusCode: http.StatusUnauthorized, + }) + + // ErrorCodeDenied is returned if a client does not have sufficient + // permission to perform an action. + ErrorCodeDenied = Register("errcode", ErrorDescriptor{ + Value: "DENIED", + Message: "requested access to the resource is denied", + Description: `The access controller denied access for the + operation on a resource.`, + HTTPStatusCode: http.StatusForbidden, + }) + + // ErrorCodeUnavailable provides a common error to report unavailability + // of a service or endpoint. + ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{ + Value: "UNAVAILABLE", + Message: "service unavailable", + Description: "Returned when a service is not available", + HTTPStatusCode: http.StatusServiceUnavailable, + }) + + // ErrorCodeTooManyRequests is returned if a client attempts too many + // times to contact a service endpoint. + ErrorCodeTooManyRequests = Register("errcode", ErrorDescriptor{ + Value: "TOOMANYREQUESTS", + Message: "too many requests", + Description: `Returned when a client attempts to contact a + service too many times`, + HTTPStatusCode: http.StatusTooManyRequests, + }) +) + +var nextCode = 1000 +var registerLock sync.Mutex + +// Register will make the passed-in error known to the environment and +// return a new ErrorCode +func Register(group string, descriptor ErrorDescriptor) ErrorCode { + registerLock.Lock() + defer registerLock.Unlock() + + descriptor.Code = ErrorCode(nextCode) + + if _, ok := idToDescriptors[descriptor.Value]; ok { + panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value)) + } + if _, ok := errorCodeToDescriptors[descriptor.Code]; ok { + panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code)) + } + + groupToDescriptors[group] = append(groupToDescriptors[group], descriptor) + errorCodeToDescriptors[descriptor.Code] = descriptor + idToDescriptors[descriptor.Value] = descriptor + + nextCode++ + return descriptor.Code +} + +type byValue []ErrorDescriptor + +func (a byValue) Len() int { return len(a) } +func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value } + +// GetGroupNames returns the list of Error group names that are registered +func GetGroupNames() []string { + keys := []string{} + + for k := range groupToDescriptors { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +// GetErrorCodeGroup returns the named group of error descriptors +func GetErrorCodeGroup(name string) []ErrorDescriptor { + desc := groupToDescriptors[name] + sort.Sort(byValue(desc)) + return desc +} + +// GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are +// registered, irrespective of what group they're in +func GetErrorAllDescriptors() []ErrorDescriptor { + result := []ErrorDescriptor{} + + for _, group := range GetGroupNames() { + result = append(result, GetErrorCodeGroup(group)...) + } + sort.Sort(byValue(result)) + return result +} diff --git a/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go b/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go new file mode 100644 index 0000000000..a9616c58ad --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/v2/descriptors.go @@ -0,0 +1,1596 @@ +package v2 + +import ( + "net/http" + "regexp" + + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/errcode" + "github.com/opencontainers/go-digest" +) + +var ( + nameParameterDescriptor = ParameterDescriptor{ + Name: "name", + Type: "string", + Format: reference.NameRegexp.String(), + Required: true, + Description: `Name of the target repository.`, + } + + referenceParameterDescriptor = ParameterDescriptor{ + Name: "reference", + Type: "string", + Format: reference.TagRegexp.String(), + Required: true, + Description: `Tag or digest of the target manifest.`, + } + + uuidParameterDescriptor = ParameterDescriptor{ + Name: "uuid", + Type: "opaque", + Required: true, + Description: "A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.", + } + + digestPathParameter = ParameterDescriptor{ + Name: "digest", + Type: "path", + Required: true, + Format: digest.DigestRegexp.String(), + Description: `Digest of desired blob.`, + } + + hostHeader = ParameterDescriptor{ + Name: "Host", + Type: "string", + Description: "Standard HTTP Host Header. Should be set to the registry host.", + Format: "", + Examples: []string{"registry-1.docker.io"}, + } + + authHeader = ParameterDescriptor{ + Name: "Authorization", + Type: "string", + Description: "An RFC7235 compliant authorization header.", + Format: " ", + Examples: []string{"Bearer dGhpcyBpcyBhIGZha2UgYmVhcmVyIHRva2VuIQ=="}, + } + + authChallengeHeader = ParameterDescriptor{ + Name: "WWW-Authenticate", + Type: "string", + Description: "An RFC7235 compliant authentication challenge header.", + Format: ` realm="", ..."`, + Examples: []string{ + `Bearer realm="https://auth.docker.com/", service="registry.docker.com", scopes="repository:library/ubuntu:pull"`, + }, + } + + contentLengthZeroHeader = ParameterDescriptor{ + Name: "Content-Length", + Description: "The `Content-Length` header must be zero and the body must be empty.", + Type: "integer", + Format: "0", + } + + dockerUploadUUIDHeader = ParameterDescriptor{ + Name: "Docker-Upload-UUID", + Description: "Identifies the docker upload uuid for the current request.", + Type: "uuid", + Format: "", + } + + digestHeader = ParameterDescriptor{ + Name: "Docker-Content-Digest", + Description: "Digest of the targeted content for the request.", + Type: "digest", + Format: "", + } + + linkHeader = ParameterDescriptor{ + Name: "Link", + Type: "link", + Description: "RFC5988 compliant rel='next' with URL to next result set, if available", + Format: `<?n=&last=>; rel="next"`, + } + + paginationParameters = []ParameterDescriptor{ + { + Name: "n", + Type: "integer", + Description: "Limit the number of entries in each response. It not present, all entries will be returned.", + Format: "", + Required: false, + }, + { + Name: "last", + Type: "string", + Description: "Result set will include values lexically after last.", + Format: "", + Required: false, + }, + } + + unauthorizedResponseDescriptor = ResponseDescriptor{ + Name: "Authentication Required", + StatusCode: http.StatusUnauthorized, + Description: "The client is not authenticated.", + Headers: []ParameterDescriptor{ + authChallengeHeader, + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnauthorized, + }, + } + + repositoryNotFoundResponseDescriptor = ResponseDescriptor{ + Name: "No Such Repository Error", + StatusCode: http.StatusNotFound, + Description: "The repository is not known to the registry.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + }, + } + + deniedResponseDescriptor = ResponseDescriptor{ + Name: "Access Denied", + StatusCode: http.StatusForbidden, + Description: "The client does not have required access to the repository.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeDenied, + }, + } + + tooManyRequestsDescriptor = ResponseDescriptor{ + Name: "Too Many Requests", + StatusCode: http.StatusTooManyRequests, + Description: "The client made too many requests within a time interval.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeTooManyRequests, + }, + } +) + +const ( + manifestBody = `{ + "name": , + "tag": , + "fsLayers": [ + { + "blobSum": "" + }, + ... + ] + ], + "history": , + "signature": +}` + + errorsBody = `{ + "errors:" [ + { + "code": , + "message": "", + "detail": ... + }, + ... + ] +}` +) + +// APIDescriptor exports descriptions of the layout of the v2 registry API. +var APIDescriptor = struct { + // RouteDescriptors provides a list of the routes available in the API. + RouteDescriptors []RouteDescriptor +}{ + RouteDescriptors: routeDescriptors, +} + +// RouteDescriptor describes a route specified by name. +type RouteDescriptor struct { + // Name is the name of the route, as specified in RouteNameXXX exports. + // These names a should be considered a unique reference for a route. If + // the route is registered with gorilla, this is the name that will be + // used. + Name string + + // Path is a gorilla/mux-compatible regexp that can be used to match the + // route. For any incoming method and path, only one route descriptor + // should match. + Path string + + // Entity should be a short, human-readalbe description of the object + // targeted by the endpoint. + Entity string + + // Description should provide an accurate overview of the functionality + // provided by the route. + Description string + + // Methods should describe the various HTTP methods that may be used on + // this route, including request and response formats. + Methods []MethodDescriptor +} + +// MethodDescriptor provides a description of the requests that may be +// conducted with the target method. +type MethodDescriptor struct { + + // Method is an HTTP method, such as GET, PUT or POST. + Method string + + // Description should provide an overview of the functionality provided by + // the covered method, suitable for use in documentation. Use of markdown + // here is encouraged. + Description string + + // Requests is a slice of request descriptors enumerating how this + // endpoint may be used. + Requests []RequestDescriptor +} + +// RequestDescriptor covers a particular set of headers and parameters that +// can be carried out with the parent method. Its most helpful to have one +// RequestDescriptor per API use case. +type RequestDescriptor struct { + // Name provides a short identifier for the request, usable as a title or + // to provide quick context for the particular request. + Name string + + // Description should cover the requests purpose, covering any details for + // this particular use case. + Description string + + // Headers describes headers that must be used with the HTTP request. + Headers []ParameterDescriptor + + // PathParameters enumerate the parameterized path components for the + // given request, as defined in the route's regular expression. + PathParameters []ParameterDescriptor + + // QueryParameters provides a list of query parameters for the given + // request. + QueryParameters []ParameterDescriptor + + // Body describes the format of the request body. + Body BodyDescriptor + + // Successes enumerates the possible responses that are considered to be + // the result of a successful request. + Successes []ResponseDescriptor + + // Failures covers the possible failures from this particular request. + Failures []ResponseDescriptor +} + +// ResponseDescriptor describes the components of an API response. +type ResponseDescriptor struct { + // Name provides a short identifier for the response, usable as a title or + // to provide quick context for the particular response. + Name string + + // Description should provide a brief overview of the role of the + // response. + Description string + + // StatusCode specifies the status received by this particular response. + StatusCode int + + // Headers covers any headers that may be returned from the response. + Headers []ParameterDescriptor + + // Fields describes any fields that may be present in the response. + Fields []ParameterDescriptor + + // ErrorCodes enumerates the error codes that may be returned along with + // the response. + ErrorCodes []errcode.ErrorCode + + // Body describes the body of the response, if any. + Body BodyDescriptor +} + +// BodyDescriptor describes a request body and its expected content type. For +// the most part, it should be example json or some placeholder for body +// data in documentation. +type BodyDescriptor struct { + ContentType string + Format string +} + +// ParameterDescriptor describes the format of a request parameter, which may +// be a header, path parameter or query parameter. +type ParameterDescriptor struct { + // Name is the name of the parameter, either of the path component or + // query parameter. + Name string + + // Type specifies the type of the parameter, such as string, integer, etc. + Type string + + // Description provides a human-readable description of the parameter. + Description string + + // Required means the field is required when set. + Required bool + + // Format is a specifying the string format accepted by this parameter. + Format string + + // Regexp is a compiled regular expression that can be used to validate + // the contents of the parameter. + Regexp *regexp.Regexp + + // Examples provides multiple examples for the values that might be valid + // for this parameter. + Examples []string +} + +var routeDescriptors = []RouteDescriptor{ + { + Name: RouteNameBase, + Path: "/v2/", + Entity: "Base", + Description: `Base V2 API route. Typically, this can be used for lightweight version checks and to validate registry authentication.`, + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Check that the endpoint implements Docker Registry API V2.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + Successes: []ResponseDescriptor{ + { + Description: "The API implements V2 protocol and is accessible.", + StatusCode: http.StatusOK, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "The registry does not implement the V2 API.", + StatusCode: http.StatusNotFound, + }, + unauthorizedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + }, + }, + { + Name: RouteNameTags, + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/tags/list", + Entity: "Tags", + Description: "Retrieve information about tags.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Fetch the tags under the repository identified by `name`.", + Requests: []RequestDescriptor{ + { + Name: "Tags", + Description: "Return all tags for the repository", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusOK, + Description: "A list of tags for the named repository.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "name": , + "tags": [ + , + ... + ] +}`, + }, + }, + }, + Failures: []ResponseDescriptor{ + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + { + Name: "Tags Paginated", + Description: "Return a portion of the tags for the specified repository.", + PathParameters: []ParameterDescriptor{nameParameterDescriptor}, + QueryParameters: paginationParameters, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusOK, + Description: "A list of tags for the named repository.", + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + linkHeader, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "name": , + "tags": [ + , + ... + ], +}`, + }, + }, + }, + Failures: []ResponseDescriptor{ + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + }, + }, + { + Name: RouteNameManifest, + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/manifests/{reference:" + reference.TagRegexp.String() + "|" + digest.DigestRegexp.String() + "}", + Entity: "Manifest", + Description: "Create, update, delete and retrieve manifests.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + referenceParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + Description: "The manifest identified by `name` and `reference`. The contents can be used to identify and resolve resources required to run the specified image.", + StatusCode: http.StatusOK, + Headers: []ParameterDescriptor{ + digestHeader, + }, + Body: BodyDescriptor{ + ContentType: "", + Format: manifestBody, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "The name or reference was invalid.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeTagInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + { + Method: "PUT", + Description: "Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + referenceParameterDescriptor, + }, + Body: BodyDescriptor{ + ContentType: "", + Format: manifestBody, + }, + Successes: []ResponseDescriptor{ + { + Description: "The manifest has been accepted by the registry and is stored under the specified `name` and `tag`.", + StatusCode: http.StatusCreated, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Description: "The canonical location url of the uploaded manifest.", + Format: "", + }, + contentLengthZeroHeader, + digestHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Manifest", + Description: "The received manifest was invalid in some way, as described by the error codes. The client should resolve the issue and retry the request.", + StatusCode: http.StatusBadRequest, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeTagInvalid, + ErrorCodeManifestInvalid, + ErrorCodeManifestUnverified, + ErrorCodeBlobUnknown, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + { + Name: "Missing Layer(s)", + Description: "One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "errors:" [{ + "code": "BLOB_UNKNOWN", + "message": "blob unknown to registry", + "detail": { + "digest": "" + } + }, + ... + ] +}`, + }, + }, + { + Name: "Not allowed", + Description: "Manifest put is not allowed because the registry is configured as a pull-through cache or for some other reason", + StatusCode: http.StatusMethodNotAllowed, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + }, + }, + }, + }, + { + Method: "DELETE", + Description: "Delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted by `digest`.", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + referenceParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusAccepted, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Reference", + Description: "The specified `name` or `reference` were invalid and the delete was unable to proceed.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeTagInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + { + Name: "Unknown Manifest", + Description: "The specified `name` or `reference` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeManifestUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Name: "Not allowed", + Description: "Manifest delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled.", + StatusCode: http.StatusMethodNotAllowed, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + }, + }, + }, + }, + }, + }, + + { + Name: RouteNameBlob, + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/{digest:" + digest.DigestRegexp.String() + "}", + Entity: "Blob", + Description: "Operations on blobs identified by `name` and `digest`. Used to fetch or delete layers by digest.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", + Requests: []RequestDescriptor{ + { + Name: "Fetch Blob", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + digestPathParameter, + }, + Successes: []ResponseDescriptor{ + { + Description: "The blob identified by `digest` is available. The blob content will be present in the body of the request.", + StatusCode: http.StatusOK, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "The length of the requested blob content.", + Format: "", + }, + digestHeader, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + }, + { + Description: "The blob identified by `digest` is available at the provided location.", + StatusCode: http.StatusTemporaryRedirect, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Description: "The location where the layer should be accessible.", + Format: "", + }, + digestHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeDigestInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", + StatusCode: http.StatusNotFound, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeBlobUnknown, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + { + Name: "Fetch Blob Part", + Description: "This endpoint may also support RFC7233 compliant range requests. Support can be detected by issuing a HEAD request. If the header `Accept-Range: bytes` is returned, range requests can be used to fetch partial content.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + { + Name: "Range", + Type: "string", + Description: "HTTP Range header specifying blob chunk.", + Format: "bytes=-", + }, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + digestPathParameter, + }, + Successes: []ResponseDescriptor{ + { + Description: "The blob identified by `digest` is available. The specified chunk of blob content will be present in the body of the request.", + StatusCode: http.StatusPartialContent, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "The length of the requested blob chunk.", + Format: "", + }, + { + Name: "Content-Range", + Type: "byte range", + Description: "Content range of blob chunk.", + Format: "bytes -/", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeDigestInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeBlobUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The range specification cannot be satisfied for the requested content. This can happen when the range is not formatted correctly or if the range is outside of the valid size of the content.", + StatusCode: http.StatusRequestedRangeNotSatisfiable, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + { + Method: "DELETE", + Description: "Delete the blob identified by `name` and `digest`", + Requests: []RequestDescriptor{ + { + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + digestPathParameter, + }, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusAccepted, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "0", + Format: "0", + }, + digestHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Digest", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + }, + }, + { + Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", + StatusCode: http.StatusNotFound, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameUnknown, + ErrorCodeBlobUnknown, + }, + }, + { + Description: "Blob delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled", + StatusCode: http.StatusMethodNotAllowed, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + + // TODO(stevvooe): We may want to add a PUT request here to + // kickoff an upload of a blob, integrated with the blob upload + // API. + }, + }, + + { + Name: RouteNameBlobUpload, + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/", + Entity: "Initiate Blob Upload", + Description: "Initiate a blob upload. This endpoint can be used to create resumable uploads or monolithic uploads.", + Methods: []MethodDescriptor{ + { + Method: "POST", + Description: "Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request.", + Requests: []RequestDescriptor{ + { + Name: "Initiate Monolithic Blob Upload", + Description: "Upload a blob identified by the `digest` parameter in single request. This upload will not be resumable unless a recoverable error is returned.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + { + Name: "Content-Length", + Type: "integer", + Format: "", + }, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + }, + QueryParameters: []ParameterDescriptor{ + { + Name: "digest", + Type: "query", + Format: "", + Regexp: digest.DigestRegexp, + Description: `Digest of uploaded blob. If present, the upload will be completed, in a single request, with contents of the request body as the resulting blob.`, + }, + }, + Body: BodyDescriptor{ + ContentType: "application/octect-stream", + Format: "", + }, + Successes: []ResponseDescriptor{ + { + Description: "The blob has been created in the registry and is available at the provided location.", + StatusCode: http.StatusCreated, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Digest", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + }, + }, + { + Name: "Not allowed", + Description: "Blob upload is not allowed because the registry is configured as a pull-through cache or for some other reason", + StatusCode: http.StatusMethodNotAllowed, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + { + Name: "Initiate Resumable Blob Upload", + Description: "Initiate a resumable blob upload with an empty request body.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + contentLengthZeroHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + Description: "The upload has been created. The `Location` header must be used to complete the upload. The response should be identical to a `GET` request on the contents of the returned `Location` header.", + StatusCode: http.StatusAccepted, + Headers: []ParameterDescriptor{ + contentLengthZeroHeader, + { + Name: "Location", + Type: "url", + Format: "/v2//blobs/uploads/", + Description: "The location of the created upload. Clients should use the contents verbatim to complete the upload, adding parameters where required.", + }, + { + Name: "Range", + Format: "0-0", + Description: "Range header indicating the progress of the upload. When starting an upload, it will return an empty range, since no content has been received.", + }, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Digest", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + { + Name: "Mount Blob", + Description: "Mount a blob identified by the `mount` parameter from another repository.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + contentLengthZeroHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + }, + QueryParameters: []ParameterDescriptor{ + { + Name: "mount", + Type: "query", + Format: "", + Regexp: digest.DigestRegexp, + Description: `Digest of blob to mount from the source repository.`, + }, + { + Name: "from", + Type: "query", + Format: "", + Regexp: reference.NameRegexp, + Description: `Name of the source repository.`, + }, + }, + Successes: []ResponseDescriptor{ + { + Description: "The blob has been mounted in the repository and is available at the provided location.", + StatusCode: http.StatusCreated, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Name: "Invalid Name or Digest", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + }, + }, + { + Name: "Not allowed", + Description: "Blob mount is not allowed because the registry is configured as a pull-through cache or for some other reason", + StatusCode: http.StatusMethodNotAllowed, + ErrorCodes: []errcode.ErrorCode{ + errcode.ErrorCodeUnsupported, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + }, + }, + + { + Name: RouteNameBlobUploadChunk, + Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/{uuid:[a-zA-Z0-9-_.=]+}", + Entity: "Blob Upload", + Description: "Interact with blob uploads. Clients should never assemble URLs for this endpoint and should only take it through the `Location` header on related API requests. The `Location` header and its parameters should be preserved by clients, using the latest value returned via upload related API calls.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload.", + Requests: []RequestDescriptor{ + { + Description: "Retrieve the progress of the current upload, as reported by the `Range` header.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + Successes: []ResponseDescriptor{ + { + Name: "Upload Progress", + Description: "The upload is known and in progress. The last received offset is available in the `Range` header.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + { + Name: "Range", + Type: "header", + Format: "0-", + Description: "Range indicating the current progress of the upload.", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was an error processing the upload and it must be restarted.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The upload is unknown to the registry. The upload must be restarted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + { + Method: "PATCH", + Description: "Upload a chunk of data for the specified upload.", + Requests: []RequestDescriptor{ + { + Name: "Stream upload", + Description: "Upload a stream of data to upload without completing the upload.", + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + Successes: []ResponseDescriptor{ + { + Name: "Data Accepted", + Description: "The stream of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "/v2//blobs/uploads/", + Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", + }, + { + Name: "Range", + Type: "header", + Format: "0-", + Description: "Range indicating the current progress of the upload.", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was an error processing the upload and it must be restarted.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The upload is unknown to the registry. The upload must be restarted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + { + Name: "Chunked upload", + Description: "Upload a chunk of data to specified upload without completing the upload. The data will be uploaded to the specified Content Range.", + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + { + Name: "Content-Range", + Type: "header", + Format: "-", + Required: true, + Description: "Range of bytes identifying the desired block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header.", + }, + { + Name: "Content-Length", + Type: "integer", + Format: "", + Description: "Length of the chunk being uploaded, corresponding the length of the request body.", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + Successes: []ResponseDescriptor{ + { + Name: "Chunk Accepted", + Description: "The chunk of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "/v2//blobs/uploads/", + Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", + }, + { + Name: "Range", + Type: "header", + Format: "0-", + Description: "Range indicating the current progress of the upload.", + }, + contentLengthZeroHeader, + dockerUploadUUIDHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was an error processing the upload and it must be restarted.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The upload is unknown to the registry. The upload must be restarted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid.", + StatusCode: http.StatusRequestedRangeNotSatisfiable, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + { + Method: "PUT", + Description: "Complete the upload specified by `uuid`, optionally appending the body as the final chunk.", + Requests: []RequestDescriptor{ + { + Description: "Complete the upload, providing all the data in the body, if necessary. A request without a body will just complete the upload with previously uploaded content.", + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + { + Name: "Content-Length", + Type: "integer", + Format: "", + Description: "Length of the data being uploaded, corresponding to the length of the request body. May be zero if no data is provided.", + }, + }, + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + QueryParameters: []ParameterDescriptor{ + { + Name: "digest", + Type: "string", + Format: "", + Regexp: digest.DigestRegexp, + Required: true, + Description: `Digest of uploaded blob.`, + }, + }, + Body: BodyDescriptor{ + ContentType: "application/octet-stream", + Format: "", + }, + Successes: []ResponseDescriptor{ + { + Name: "Upload Complete", + Description: "The upload has been completed and accepted by the registry. The canonical location will be available in the `Location` header.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + { + Name: "Location", + Type: "url", + Format: "", + Description: "The canonical location of the blob for retrieval", + }, + { + Name: "Content-Range", + Type: "header", + Format: "-", + Description: "Range of bytes identifying the desired block of content represented by the body. Start must match the end of offset retrieved via status check. Note that this is a non-standard use of the `Content-Range` header.", + }, + contentLengthZeroHeader, + digestHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "There was an error processing the upload and it must be restarted.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeDigestInvalid, + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + errcode.ErrorCodeUnsupported, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The upload is unknown to the registry. The upload must be restarted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + { + Method: "DELETE", + Description: "Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout.", + Requests: []RequestDescriptor{ + { + Description: "Cancel the upload specified by `uuid`.", + PathParameters: []ParameterDescriptor{ + nameParameterDescriptor, + uuidParameterDescriptor, + }, + Headers: []ParameterDescriptor{ + hostHeader, + authHeader, + contentLengthZeroHeader, + }, + Successes: []ResponseDescriptor{ + { + Name: "Upload Deleted", + Description: "The upload has been successfully deleted.", + StatusCode: http.StatusNoContent, + Headers: []ParameterDescriptor{ + contentLengthZeroHeader, + }, + }, + }, + Failures: []ResponseDescriptor{ + { + Description: "An error was encountered processing the delete. The client may ignore this error.", + StatusCode: http.StatusBadRequest, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeNameInvalid, + ErrorCodeBlobUploadInvalid, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + { + Description: "The upload is unknown to the registry. The client may ignore this error and assume the upload has been deleted.", + StatusCode: http.StatusNotFound, + ErrorCodes: []errcode.ErrorCode{ + ErrorCodeBlobUploadUnknown, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: errorsBody, + }, + }, + unauthorizedResponseDescriptor, + repositoryNotFoundResponseDescriptor, + deniedResponseDescriptor, + tooManyRequestsDescriptor, + }, + }, + }, + }, + }, + }, + { + Name: RouteNameCatalog, + Path: "/v2/_catalog", + Entity: "Catalog", + Description: "List a set of available repositories in the local registry cluster. Does not provide any indication of what may be available upstream. Applications can only determine if a repository is available but not if it is not available.", + Methods: []MethodDescriptor{ + { + Method: "GET", + Description: "Retrieve a sorted, json list of repositories available in the registry.", + Requests: []RequestDescriptor{ + { + Name: "Catalog Fetch", + Description: "Request an unabridged list of repositories available. The implementation may impose a maximum limit and return a partial set with pagination links.", + Successes: []ResponseDescriptor{ + { + Description: "Returns the unabridged list of repositories as a json response.", + StatusCode: http.StatusOK, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + }, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "repositories": [ + , + ... + ] +}`, + }, + }, + }, + }, + { + Name: "Catalog Fetch Paginated", + Description: "Return the specified portion of repositories.", + QueryParameters: paginationParameters, + Successes: []ResponseDescriptor{ + { + StatusCode: http.StatusOK, + Body: BodyDescriptor{ + ContentType: "application/json; charset=utf-8", + Format: `{ + "repositories": [ + , + ... + ] + "next": "?last=&n=" +}`, + }, + Headers: []ParameterDescriptor{ + { + Name: "Content-Length", + Type: "integer", + Description: "Length of the JSON response body.", + Format: "", + }, + linkHeader, + }, + }, + }, + }, + }, + }, + }, + }, +} + +var routeDescriptorsMap map[string]RouteDescriptor + +func init() { + routeDescriptorsMap = make(map[string]RouteDescriptor, len(routeDescriptors)) + + for _, descriptor := range routeDescriptors { + routeDescriptorsMap[descriptor.Name] = descriptor + } +} diff --git a/vendor/github.com/docker/distribution/registry/api/v2/doc.go b/vendor/github.com/docker/distribution/registry/api/v2/doc.go new file mode 100644 index 0000000000..cde0119594 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/v2/doc.go @@ -0,0 +1,9 @@ +// Package v2 describes routes, urls and the error codes used in the Docker +// Registry JSON HTTP API V2. In addition to declarations, descriptors are +// provided for routes and error codes that can be used for implementation and +// automatically generating documentation. +// +// Definitions here are considered to be locked down for the V2 registry api. +// Any changes must be considered carefully and should not proceed without a +// change proposal in docker core. +package v2 diff --git a/vendor/github.com/docker/distribution/registry/api/v2/errors.go b/vendor/github.com/docker/distribution/registry/api/v2/errors.go new file mode 100644 index 0000000000..97d6923aa0 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/v2/errors.go @@ -0,0 +1,136 @@ +package v2 + +import ( + "net/http" + + "github.com/docker/distribution/registry/api/errcode" +) + +const errGroup = "registry.api.v2" + +var ( + // ErrorCodeDigestInvalid is returned when uploading a blob if the + // provided digest does not match the blob contents. + ErrorCodeDigestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "DIGEST_INVALID", + Message: "provided digest did not match uploaded content", + Description: `When a blob is uploaded, the registry will check that + the content matches the digest provided by the client. The error may + include a detail structure with the key "digest", including the + invalid digest string. This error may also be returned when a manifest + includes an invalid layer digest.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeSizeInvalid is returned when uploading a blob if the provided + ErrorCodeSizeInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "SIZE_INVALID", + Message: "provided length did not match content length", + Description: `When a layer is uploaded, the provided size will be + checked against the uploaded content. If they do not match, this error + will be returned.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeNameInvalid is returned when the name in the manifest does not + // match the provided name. + ErrorCodeNameInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "NAME_INVALID", + Message: "invalid repository name", + Description: `Invalid repository name encountered either during + manifest validation or any API operation.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeTagInvalid is returned when the tag in the manifest does not + // match the provided tag. + ErrorCodeTagInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "TAG_INVALID", + Message: "manifest tag did not match URI", + Description: `During a manifest upload, if the tag in the manifest + does not match the uri tag, this error will be returned.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeNameUnknown when the repository name is not known. + ErrorCodeNameUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "NAME_UNKNOWN", + Message: "repository name not known to registry", + Description: `This is returned if the name used during an operation is + unknown to the registry.`, + HTTPStatusCode: http.StatusNotFound, + }) + + // ErrorCodeManifestUnknown returned when image manifest is unknown. + ErrorCodeManifestUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MANIFEST_UNKNOWN", + Message: "manifest unknown", + Description: `This error is returned when the manifest, identified by + name and tag is unknown to the repository.`, + HTTPStatusCode: http.StatusNotFound, + }) + + // ErrorCodeManifestInvalid returned when an image manifest is invalid, + // typically during a PUT operation. This error encompasses all errors + // encountered during manifest validation that aren't signature errors. + ErrorCodeManifestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MANIFEST_INVALID", + Message: "manifest invalid", + Description: `During upload, manifests undergo several checks ensuring + validity. If those checks fail, this error may be returned, unless a + more specific error is included. The detail will contain information + the failed validation.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeManifestUnverified is returned when the manifest fails + // signature verification. + ErrorCodeManifestUnverified = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MANIFEST_UNVERIFIED", + Message: "manifest failed signature verification", + Description: `During manifest upload, if the manifest fails signature + verification, this error will be returned.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeManifestBlobUnknown is returned when a manifest blob is + // unknown to the registry. + ErrorCodeManifestBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "MANIFEST_BLOB_UNKNOWN", + Message: "blob unknown to registry", + Description: `This error may be returned when a manifest blob is + unknown to the registry.`, + HTTPStatusCode: http.StatusBadRequest, + }) + + // ErrorCodeBlobUnknown is returned when a blob is unknown to the + // registry. This can happen when the manifest references a nonexistent + // layer or the result is not found by a blob fetch. + ErrorCodeBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "BLOB_UNKNOWN", + Message: "blob unknown to registry", + Description: `This error may be returned when a blob is unknown to the + registry in a specified repository. This can be returned with a + standard get or if a manifest references an unknown layer during + upload.`, + HTTPStatusCode: http.StatusNotFound, + }) + + // ErrorCodeBlobUploadUnknown is returned when an upload is unknown. + ErrorCodeBlobUploadUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "BLOB_UPLOAD_UNKNOWN", + Message: "blob upload unknown to registry", + Description: `If a blob upload has been cancelled or was never + started, this error code may be returned.`, + HTTPStatusCode: http.StatusNotFound, + }) + + // ErrorCodeBlobUploadInvalid is returned when an upload is invalid. + ErrorCodeBlobUploadInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ + Value: "BLOB_UPLOAD_INVALID", + Message: "blob upload invalid", + Description: `The blob upload encountered an error and can no + longer proceed.`, + HTTPStatusCode: http.StatusNotFound, + }) +) diff --git a/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go b/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go new file mode 100644 index 0000000000..9bc41a3a64 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/v2/headerparser.go @@ -0,0 +1,161 @@ +package v2 + +import ( + "fmt" + "regexp" + "strings" + "unicode" +) + +var ( + // according to rfc7230 + reToken = regexp.MustCompile(`^[^"(),/:;<=>?@[\]{}[:space:][:cntrl:]]+`) + reQuotedValue = regexp.MustCompile(`^[^\\"]+`) + reEscapedCharacter = regexp.MustCompile(`^[[:blank:][:graph:]]`) +) + +// parseForwardedHeader is a benevolent parser of Forwarded header defined in rfc7239. The header contains +// a comma-separated list of forwarding key-value pairs. Each list element is set by single proxy. The +// function parses only the first element of the list, which is set by the very first proxy. It returns a map +// of corresponding key-value pairs and an unparsed slice of the input string. +// +// Examples of Forwarded header values: +// +// 1. Forwarded: For=192.0.2.43; Proto=https,For="[2001:db8:cafe::17]",For=unknown +// 2. Forwarded: for="192.0.2.43:443"; host="registry.example.org", for="10.10.05.40:80" +// +// The first will be parsed into {"for": "192.0.2.43", "proto": "https"} while the second into +// {"for": "192.0.2.43:443", "host": "registry.example.org"}. +func parseForwardedHeader(forwarded string) (map[string]string, string, error) { + // Following are states of forwarded header parser. Any state could transition to a failure. + const ( + // terminating state; can transition to Parameter + stateElement = iota + // terminating state; can transition to KeyValueDelimiter + stateParameter + // can transition to Value + stateKeyValueDelimiter + // can transition to one of { QuotedValue, PairEnd } + stateValue + // can transition to one of { EscapedCharacter, PairEnd } + stateQuotedValue + // can transition to one of { QuotedValue } + stateEscapedCharacter + // terminating state; can transition to one of { Parameter, Element } + statePairEnd + ) + + var ( + parameter string + value string + parse = forwarded[:] + res = map[string]string{} + state = stateElement + ) + +Loop: + for { + // skip spaces unless in quoted value + if state != stateQuotedValue && state != stateEscapedCharacter { + parse = strings.TrimLeftFunc(parse, unicode.IsSpace) + } + + if len(parse) == 0 { + if state != stateElement && state != statePairEnd && state != stateParameter { + return nil, parse, fmt.Errorf("unexpected end of input") + } + // terminating + break + } + + switch state { + // terminate at list element delimiter + case stateElement: + if parse[0] == ',' { + parse = parse[1:] + break Loop + } + state = stateParameter + + // parse parameter (the key of key-value pair) + case stateParameter: + match := reToken.FindString(parse) + if len(match) == 0 { + return nil, parse, fmt.Errorf("failed to parse token at position %d", len(forwarded)-len(parse)) + } + parameter = strings.ToLower(match) + parse = parse[len(match):] + state = stateKeyValueDelimiter + + // parse '=' + case stateKeyValueDelimiter: + if parse[0] != '=' { + return nil, parse, fmt.Errorf("expected '=', not '%c' at position %d", parse[0], len(forwarded)-len(parse)) + } + parse = parse[1:] + state = stateValue + + // parse value or quoted value + case stateValue: + if parse[0] == '"' { + parse = parse[1:] + state = stateQuotedValue + } else { + value = reToken.FindString(parse) + if len(value) == 0 { + return nil, parse, fmt.Errorf("failed to parse value at position %d", len(forwarded)-len(parse)) + } + if _, exists := res[parameter]; exists { + return nil, parse, fmt.Errorf("duplicate parameter %q at position %d", parameter, len(forwarded)-len(parse)) + } + res[parameter] = value + parse = parse[len(value):] + value = "" + state = statePairEnd + } + + // parse a part of quoted value until the first backslash + case stateQuotedValue: + match := reQuotedValue.FindString(parse) + value += match + parse = parse[len(match):] + switch { + case len(parse) == 0: + return nil, parse, fmt.Errorf("unterminated quoted string") + case parse[0] == '"': + res[parameter] = value + value = "" + parse = parse[1:] + state = statePairEnd + case parse[0] == '\\': + parse = parse[1:] + state = stateEscapedCharacter + } + + // parse escaped character in a quoted string, ignore the backslash + // transition back to QuotedValue state + case stateEscapedCharacter: + c := reEscapedCharacter.FindString(parse) + if len(c) == 0 { + return nil, parse, fmt.Errorf("invalid escape sequence at position %d", len(forwarded)-len(parse)-1) + } + value += c + parse = parse[1:] + state = stateQuotedValue + + // expect either a new key-value pair, new list or end of input + case statePairEnd: + switch parse[0] { + case ';': + parse = parse[1:] + state = stateParameter + case ',': + state = stateElement + default: + return nil, parse, fmt.Errorf("expected ',' or ';', not %c at position %d", parse[0], len(forwarded)-len(parse)) + } + } + } + + return res, parse, nil +} diff --git a/vendor/github.com/docker/distribution/registry/api/v2/routes.go b/vendor/github.com/docker/distribution/registry/api/v2/routes.go new file mode 100644 index 0000000000..9612ac2e5a --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/v2/routes.go @@ -0,0 +1,40 @@ +package v2 + +import "github.com/gorilla/mux" + +// The following are definitions of the name under which all V2 routes are +// registered. These symbols can be used to look up a route based on the name. +const ( + RouteNameBase = "base" + RouteNameManifest = "manifest" + RouteNameTags = "tags" + RouteNameBlob = "blob" + RouteNameBlobUpload = "blob-upload" + RouteNameBlobUploadChunk = "blob-upload-chunk" + RouteNameCatalog = "catalog" +) + +// Router builds a gorilla router with named routes for the various API +// methods. This can be used directly by both server implementations and +// clients. +func Router() *mux.Router { + return RouterWithPrefix("") +} + +// RouterWithPrefix builds a gorilla router with a configured prefix +// on all routes. +func RouterWithPrefix(prefix string) *mux.Router { + rootRouter := mux.NewRouter() + router := rootRouter + if prefix != "" { + router = router.PathPrefix(prefix).Subrouter() + } + + router.StrictSlash(true) + + for _, descriptor := range routeDescriptors { + router.Path(descriptor.Path).Name(descriptor.Name) + } + + return rootRouter +} diff --git a/vendor/github.com/docker/distribution/registry/api/v2/urls.go b/vendor/github.com/docker/distribution/registry/api/v2/urls.go new file mode 100644 index 0000000000..1337bdb127 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/api/v2/urls.go @@ -0,0 +1,266 @@ +package v2 + +import ( + "fmt" + "net/http" + "net/url" + "strings" + + "github.com/docker/distribution/reference" + "github.com/gorilla/mux" +) + +// URLBuilder creates registry API urls from a single base endpoint. It can be +// used to create urls for use in a registry client or server. +// +// All urls will be created from the given base, including the api version. +// For example, if a root of "/foo/" is provided, urls generated will be fall +// under "/foo/v2/...". Most application will only provide a schema, host and +// port, such as "https://localhost:5000/". +type URLBuilder struct { + root *url.URL // url root (ie http://localhost/) + router *mux.Router + relative bool +} + +// NewURLBuilder creates a URLBuilder with provided root url object. +func NewURLBuilder(root *url.URL, relative bool) *URLBuilder { + return &URLBuilder{ + root: root, + router: Router(), + relative: relative, + } +} + +// NewURLBuilderFromString workes identically to NewURLBuilder except it takes +// a string argument for the root, returning an error if it is not a valid +// url. +func NewURLBuilderFromString(root string, relative bool) (*URLBuilder, error) { + u, err := url.Parse(root) + if err != nil { + return nil, err + } + + return NewURLBuilder(u, relative), nil +} + +// NewURLBuilderFromRequest uses information from an *http.Request to +// construct the root url. +func NewURLBuilderFromRequest(r *http.Request, relative bool) *URLBuilder { + var ( + scheme = "http" + host = r.Host + ) + + if r.TLS != nil { + scheme = "https" + } else if len(r.URL.Scheme) > 0 { + scheme = r.URL.Scheme + } + + // Handle fowarded headers + // Prefer "Forwarded" header as defined by rfc7239 if given + // see https://tools.ietf.org/html/rfc7239 + if forwarded := r.Header.Get("Forwarded"); len(forwarded) > 0 { + forwardedHeader, _, err := parseForwardedHeader(forwarded) + if err == nil { + if fproto := forwardedHeader["proto"]; len(fproto) > 0 { + scheme = fproto + } + if fhost := forwardedHeader["host"]; len(fhost) > 0 { + host = fhost + } + } + } else { + if forwardedProto := r.Header.Get("X-Forwarded-Proto"); len(forwardedProto) > 0 { + scheme = forwardedProto + } + if forwardedHost := r.Header.Get("X-Forwarded-Host"); len(forwardedHost) > 0 { + // According to the Apache mod_proxy docs, X-Forwarded-Host can be a + // comma-separated list of hosts, to which each proxy appends the + // requested host. We want to grab the first from this comma-separated + // list. + hosts := strings.SplitN(forwardedHost, ",", 2) + host = strings.TrimSpace(hosts[0]) + } + } + + basePath := routeDescriptorsMap[RouteNameBase].Path + + requestPath := r.URL.Path + index := strings.Index(requestPath, basePath) + + u := &url.URL{ + Scheme: scheme, + Host: host, + } + + if index > 0 { + // N.B. index+1 is important because we want to include the trailing / + u.Path = requestPath[0 : index+1] + } + + return NewURLBuilder(u, relative) +} + +// BuildBaseURL constructs a base url for the API, typically just "/v2/". +func (ub *URLBuilder) BuildBaseURL() (string, error) { + route := ub.cloneRoute(RouteNameBase) + + baseURL, err := route.URL() + if err != nil { + return "", err + } + + return baseURL.String(), nil +} + +// BuildCatalogURL constructs a url get a catalog of repositories +func (ub *URLBuilder) BuildCatalogURL(values ...url.Values) (string, error) { + route := ub.cloneRoute(RouteNameCatalog) + + catalogURL, err := route.URL() + if err != nil { + return "", err + } + + return appendValuesURL(catalogURL, values...).String(), nil +} + +// BuildTagsURL constructs a url to list the tags in the named repository. +func (ub *URLBuilder) BuildTagsURL(name reference.Named) (string, error) { + route := ub.cloneRoute(RouteNameTags) + + tagsURL, err := route.URL("name", name.Name()) + if err != nil { + return "", err + } + + return tagsURL.String(), nil +} + +// BuildManifestURL constructs a url for the manifest identified by name and +// reference. The argument reference may be either a tag or digest. +func (ub *URLBuilder) BuildManifestURL(ref reference.Named) (string, error) { + route := ub.cloneRoute(RouteNameManifest) + + tagOrDigest := "" + switch v := ref.(type) { + case reference.Tagged: + tagOrDigest = v.Tag() + case reference.Digested: + tagOrDigest = v.Digest().String() + default: + return "", fmt.Errorf("reference must have a tag or digest") + } + + manifestURL, err := route.URL("name", ref.Name(), "reference", tagOrDigest) + if err != nil { + return "", err + } + + return manifestURL.String(), nil +} + +// BuildBlobURL constructs the url for the blob identified by name and dgst. +func (ub *URLBuilder) BuildBlobURL(ref reference.Canonical) (string, error) { + route := ub.cloneRoute(RouteNameBlob) + + layerURL, err := route.URL("name", ref.Name(), "digest", ref.Digest().String()) + if err != nil { + return "", err + } + + return layerURL.String(), nil +} + +// BuildBlobUploadURL constructs a url to begin a blob upload in the +// repository identified by name. +func (ub *URLBuilder) BuildBlobUploadURL(name reference.Named, values ...url.Values) (string, error) { + route := ub.cloneRoute(RouteNameBlobUpload) + + uploadURL, err := route.URL("name", name.Name()) + if err != nil { + return "", err + } + + return appendValuesURL(uploadURL, values...).String(), nil +} + +// BuildBlobUploadChunkURL constructs a url for the upload identified by uuid, +// including any url values. This should generally not be used by clients, as +// this url is provided by server implementations during the blob upload +// process. +func (ub *URLBuilder) BuildBlobUploadChunkURL(name reference.Named, uuid string, values ...url.Values) (string, error) { + route := ub.cloneRoute(RouteNameBlobUploadChunk) + + uploadURL, err := route.URL("name", name.Name(), "uuid", uuid) + if err != nil { + return "", err + } + + return appendValuesURL(uploadURL, values...).String(), nil +} + +// clondedRoute returns a clone of the named route from the router. Routes +// must be cloned to avoid modifying them during url generation. +func (ub *URLBuilder) cloneRoute(name string) clonedRoute { + route := new(mux.Route) + root := new(url.URL) + + *route = *ub.router.GetRoute(name) // clone the route + *root = *ub.root + + return clonedRoute{Route: route, root: root, relative: ub.relative} +} + +type clonedRoute struct { + *mux.Route + root *url.URL + relative bool +} + +func (cr clonedRoute) URL(pairs ...string) (*url.URL, error) { + routeURL, err := cr.Route.URL(pairs...) + if err != nil { + return nil, err + } + + if cr.relative { + return routeURL, nil + } + + if routeURL.Scheme == "" && routeURL.User == nil && routeURL.Host == "" { + routeURL.Path = routeURL.Path[1:] + } + + url := cr.root.ResolveReference(routeURL) + url.Scheme = cr.root.Scheme + return url, nil +} + +// appendValuesURL appends the parameters to the url. +func appendValuesURL(u *url.URL, values ...url.Values) *url.URL { + merged := u.Query() + + for _, v := range values { + for k, vv := range v { + merged[k] = append(merged[k], vv...) + } + } + + u.RawQuery = merged.Encode() + return u +} + +// appendValues appends the parameters to the url. Panics if the string is not +// a url. +func appendValues(u string, values ...url.Values) string { + up, err := url.Parse(u) + + if err != nil { + panic(err) // should never happen + } + + return appendValuesURL(up, values...).String() +} diff --git a/vendor/github.com/docker/distribution/registry/client/auth/api_version.go b/vendor/github.com/docker/distribution/registry/client/auth/api_version.go new file mode 100644 index 0000000000..7d8f1d9576 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/auth/api_version.go @@ -0,0 +1,58 @@ +package auth + +import ( + "net/http" + "strings" +) + +// APIVersion represents a version of an API including its +// type and version number. +type APIVersion struct { + // Type refers to the name of a specific API specification + // such as "registry" + Type string + + // Version is the version of the API specification implemented, + // This may omit the revision number and only include + // the major and minor version, such as "2.0" + Version string +} + +// String returns the string formatted API Version +func (v APIVersion) String() string { + return v.Type + "/" + v.Version +} + +// APIVersions gets the API versions out of an HTTP response using the provided +// version header as the key for the HTTP header. +func APIVersions(resp *http.Response, versionHeader string) []APIVersion { + versions := []APIVersion{} + if versionHeader != "" { + for _, supportedVersions := range resp.Header[http.CanonicalHeaderKey(versionHeader)] { + for _, version := range strings.Fields(supportedVersions) { + versions = append(versions, ParseAPIVersion(version)) + } + } + } + return versions +} + +// ParseAPIVersion parses an API version string into an APIVersion +// Format (Expected, not enforced): +// API version string = '/' +// API type = [a-z][a-z0-9]* +// API version = [0-9]+(\.[0-9]+)? +// TODO(dmcgowan): Enforce format, add error condition, remove unknown type +func ParseAPIVersion(versionStr string) APIVersion { + idx := strings.IndexRune(versionStr, '/') + if idx == -1 { + return APIVersion{ + Type: "unknown", + Version: versionStr, + } + } + return APIVersion{ + Type: strings.ToLower(versionStr[:idx]), + Version: versionStr[idx+1:], + } +} diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go b/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go new file mode 100644 index 0000000000..2c3ebe1653 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/auth/challenge/addr.go @@ -0,0 +1,27 @@ +package challenge + +import ( + "net/url" + "strings" +) + +// FROM: https://golang.org/src/net/http/http.go +// Given a string of the form "host", "host:port", or "[ipv6::address]:port", +// return true if the string includes a port. +func hasPort(s string) bool { return strings.LastIndex(s, ":") > strings.LastIndex(s, "]") } + +// FROM: http://golang.org/src/net/http/transport.go +var portMap = map[string]string{ + "http": "80", + "https": "443", +} + +// canonicalAddr returns url.Host but always with a ":port" suffix +// FROM: http://golang.org/src/net/http/transport.go +func canonicalAddr(url *url.URL) string { + addr := url.Host + if !hasPort(addr) { + return addr + ":" + portMap[url.Scheme] + } + return addr +} diff --git a/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go b/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go new file mode 100644 index 0000000000..6e3f1ccc41 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/auth/challenge/authchallenge.go @@ -0,0 +1,237 @@ +package challenge + +import ( + "fmt" + "net/http" + "net/url" + "strings" + "sync" +) + +// Challenge carries information from a WWW-Authenticate response header. +// See RFC 2617. +type Challenge struct { + // Scheme is the auth-scheme according to RFC 2617 + Scheme string + + // Parameters are the auth-params according to RFC 2617 + Parameters map[string]string +} + +// Manager manages the challenges for endpoints. +// The challenges are pulled out of HTTP responses. Only +// responses which expect challenges should be added to +// the manager, since a non-unauthorized request will be +// viewed as not requiring challenges. +type Manager interface { + // GetChallenges returns the challenges for the given + // endpoint URL. + GetChallenges(endpoint url.URL) ([]Challenge, error) + + // AddResponse adds the response to the challenge + // manager. The challenges will be parsed out of + // the WWW-Authenicate headers and added to the + // URL which was produced the response. If the + // response was authorized, any challenges for the + // endpoint will be cleared. + AddResponse(resp *http.Response) error +} + +// NewSimpleManager returns an instance of +// Manger which only maps endpoints to challenges +// based on the responses which have been added the +// manager. The simple manager will make no attempt to +// perform requests on the endpoints or cache the responses +// to a backend. +func NewSimpleManager() Manager { + return &simpleManager{ + Challenges: make(map[string][]Challenge), + } +} + +type simpleManager struct { + sync.RWMutex + Challenges map[string][]Challenge +} + +func normalizeURL(endpoint *url.URL) { + endpoint.Host = strings.ToLower(endpoint.Host) + endpoint.Host = canonicalAddr(endpoint) +} + +func (m *simpleManager) GetChallenges(endpoint url.URL) ([]Challenge, error) { + normalizeURL(&endpoint) + + m.RLock() + defer m.RUnlock() + challenges := m.Challenges[endpoint.String()] + return challenges, nil +} + +func (m *simpleManager) AddResponse(resp *http.Response) error { + challenges := ResponseChallenges(resp) + if resp.Request == nil { + return fmt.Errorf("missing request reference") + } + urlCopy := url.URL{ + Path: resp.Request.URL.Path, + Host: resp.Request.URL.Host, + Scheme: resp.Request.URL.Scheme, + } + normalizeURL(&urlCopy) + + m.Lock() + defer m.Unlock() + m.Challenges[urlCopy.String()] = challenges + return nil +} + +// Octet types from RFC 2616. +type octetType byte + +var octetTypes [256]octetType + +const ( + isToken octetType = 1 << iota + isSpace +) + +func init() { + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t octetType + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 + if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { + t |= isSpace + } + if isChar && !isCtl && !isSeparator { + t |= isToken + } + octetTypes[c] = t + } +} + +// ResponseChallenges returns a list of authorization challenges +// for the given http Response. Challenges are only checked if +// the response status code was a 401. +func ResponseChallenges(resp *http.Response) []Challenge { + if resp.StatusCode == http.StatusUnauthorized { + // Parse the WWW-Authenticate Header and store the challenges + // on this endpoint object. + return parseAuthHeader(resp.Header) + } + + return nil +} + +func parseAuthHeader(header http.Header) []Challenge { + challenges := []Challenge{} + for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { + v, p := parseValueAndParams(h) + if v != "" { + challenges = append(challenges, Challenge{Scheme: v, Parameters: p}) + } + } + return challenges +} + +func parseValueAndParams(header string) (value string, params map[string]string) { + params = make(map[string]string) + value, s := expectToken(header) + if value == "" { + return + } + value = strings.ToLower(value) + s = "," + skipSpace(s) + for strings.HasPrefix(s, ",") { + var pkey string + pkey, s = expectToken(skipSpace(s[1:])) + if pkey == "" { + return + } + if !strings.HasPrefix(s, "=") { + return + } + var pvalue string + pvalue, s = expectTokenOrQuoted(s[1:]) + if pvalue == "" { + return + } + pkey = strings.ToLower(pkey) + params[pkey] = pvalue + s = skipSpace(s) + } + return +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpace == 0 { + break + } + } + return s[i:] +} + +func expectToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isToken == 0 { + break + } + } + return s[:i], s[i:] +} + +func expectTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return expectToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + 1; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} diff --git a/vendor/github.com/docker/distribution/registry/client/auth/session.go b/vendor/github.com/docker/distribution/registry/client/auth/session.go new file mode 100644 index 0000000000..aad8a0e6f5 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/auth/session.go @@ -0,0 +1,530 @@ +package auth + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + "net/url" + "strings" + "sync" + "time" + + "github.com/docker/distribution/registry/client" + "github.com/docker/distribution/registry/client/auth/challenge" + "github.com/docker/distribution/registry/client/transport" +) + +var ( + // ErrNoBasicAuthCredentials is returned if a request can't be authorized with + // basic auth due to lack of credentials. + ErrNoBasicAuthCredentials = errors.New("no basic auth credentials") + + // ErrNoToken is returned if a request is successful but the body does not + // contain an authorization token. + ErrNoToken = errors.New("authorization server did not include a token in the response") +) + +const defaultClientID = "registry-client" + +// AuthenticationHandler is an interface for authorizing a request from +// params from a "WWW-Authenicate" header for a single scheme. +type AuthenticationHandler interface { + // Scheme returns the scheme as expected from the "WWW-Authenicate" header. + Scheme() string + + // AuthorizeRequest adds the authorization header to a request (if needed) + // using the parameters from "WWW-Authenticate" method. The parameters + // values depend on the scheme. + AuthorizeRequest(req *http.Request, params map[string]string) error +} + +// CredentialStore is an interface for getting credentials for +// a given URL +type CredentialStore interface { + // Basic returns basic auth for the given URL + Basic(*url.URL) (string, string) + + // RefreshToken returns a refresh token for the + // given URL and service + RefreshToken(*url.URL, string) string + + // SetRefreshToken sets the refresh token if none + // is provided for the given url and service + SetRefreshToken(realm *url.URL, service, token string) +} + +// NewAuthorizer creates an authorizer which can handle multiple authentication +// schemes. The handlers are tried in order, the higher priority authentication +// methods should be first. The challengeMap holds a list of challenges for +// a given root API endpoint (for example "https://registry-1.docker.io/v2/"). +func NewAuthorizer(manager challenge.Manager, handlers ...AuthenticationHandler) transport.RequestModifier { + return &endpointAuthorizer{ + challenges: manager, + handlers: handlers, + } +} + +type endpointAuthorizer struct { + challenges challenge.Manager + handlers []AuthenticationHandler +} + +func (ea *endpointAuthorizer) ModifyRequest(req *http.Request) error { + pingPath := req.URL.Path + if v2Root := strings.Index(req.URL.Path, "/v2/"); v2Root != -1 { + pingPath = pingPath[:v2Root+4] + } else if v1Root := strings.Index(req.URL.Path, "/v1/"); v1Root != -1 { + pingPath = pingPath[:v1Root] + "/v2/" + } else { + return nil + } + + ping := url.URL{ + Host: req.URL.Host, + Scheme: req.URL.Scheme, + Path: pingPath, + } + + challenges, err := ea.challenges.GetChallenges(ping) + if err != nil { + return err + } + + if len(challenges) > 0 { + for _, handler := range ea.handlers { + for _, c := range challenges { + if c.Scheme != handler.Scheme() { + continue + } + if err := handler.AuthorizeRequest(req, c.Parameters); err != nil { + return err + } + } + } + } + + return nil +} + +// This is the minimum duration a token can last (in seconds). +// A token must not live less than 60 seconds because older versions +// of the Docker client didn't read their expiration from the token +// response and assumed 60 seconds. So to remain compatible with +// those implementations, a token must live at least this long. +const minimumTokenLifetimeSeconds = 60 + +// Private interface for time used by this package to enable tests to provide their own implementation. +type clock interface { + Now() time.Time +} + +type tokenHandler struct { + creds CredentialStore + transport http.RoundTripper + clock clock + + offlineAccess bool + forceOAuth bool + clientID string + scopes []Scope + + tokenLock sync.Mutex + tokenCache string + tokenExpiration time.Time + + logger Logger +} + +// Scope is a type which is serializable to a string +// using the allow scope grammar. +type Scope interface { + String() string +} + +// RepositoryScope represents a token scope for access +// to a repository. +type RepositoryScope struct { + Repository string + Class string + Actions []string +} + +// String returns the string representation of the repository +// using the scope grammar +func (rs RepositoryScope) String() string { + repoType := "repository" + // Keep existing format for image class to maintain backwards compatibility + // with authorization servers which do not support the expanded grammar. + if rs.Class != "" && rs.Class != "image" { + repoType = fmt.Sprintf("%s(%s)", repoType, rs.Class) + } + return fmt.Sprintf("%s:%s:%s", repoType, rs.Repository, strings.Join(rs.Actions, ",")) +} + +// RegistryScope represents a token scope for access +// to resources in the registry. +type RegistryScope struct { + Name string + Actions []string +} + +// String returns the string representation of the user +// using the scope grammar +func (rs RegistryScope) String() string { + return fmt.Sprintf("registry:%s:%s", rs.Name, strings.Join(rs.Actions, ",")) +} + +// Logger defines the injectable logging interface, used on TokenHandlers. +type Logger interface { + Debugf(format string, args ...interface{}) +} + +func logDebugf(logger Logger, format string, args ...interface{}) { + if logger == nil { + return + } + logger.Debugf(format, args...) +} + +// TokenHandlerOptions is used to configure a new token handler +type TokenHandlerOptions struct { + Transport http.RoundTripper + Credentials CredentialStore + + OfflineAccess bool + ForceOAuth bool + ClientID string + Scopes []Scope + Logger Logger +} + +// An implementation of clock for providing real time data. +type realClock struct{} + +// Now implements clock +func (realClock) Now() time.Time { return time.Now() } + +// NewTokenHandler creates a new AuthenicationHandler which supports +// fetching tokens from a remote token server. +func NewTokenHandler(transport http.RoundTripper, creds CredentialStore, scope string, actions ...string) AuthenticationHandler { + // Create options... + return NewTokenHandlerWithOptions(TokenHandlerOptions{ + Transport: transport, + Credentials: creds, + Scopes: []Scope{ + RepositoryScope{ + Repository: scope, + Actions: actions, + }, + }, + }) +} + +// NewTokenHandlerWithOptions creates a new token handler using the provided +// options structure. +func NewTokenHandlerWithOptions(options TokenHandlerOptions) AuthenticationHandler { + handler := &tokenHandler{ + transport: options.Transport, + creds: options.Credentials, + offlineAccess: options.OfflineAccess, + forceOAuth: options.ForceOAuth, + clientID: options.ClientID, + scopes: options.Scopes, + clock: realClock{}, + logger: options.Logger, + } + + return handler +} + +func (th *tokenHandler) client() *http.Client { + return &http.Client{ + Transport: th.transport, + Timeout: 15 * time.Second, + } +} + +func (th *tokenHandler) Scheme() string { + return "bearer" +} + +func (th *tokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { + var additionalScopes []string + if fromParam := req.URL.Query().Get("from"); fromParam != "" { + additionalScopes = append(additionalScopes, RepositoryScope{ + Repository: fromParam, + Actions: []string{"pull"}, + }.String()) + } + + token, err := th.getToken(params, additionalScopes...) + if err != nil { + return err + } + + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) + + return nil +} + +func (th *tokenHandler) getToken(params map[string]string, additionalScopes ...string) (string, error) { + th.tokenLock.Lock() + defer th.tokenLock.Unlock() + scopes := make([]string, 0, len(th.scopes)+len(additionalScopes)) + for _, scope := range th.scopes { + scopes = append(scopes, scope.String()) + } + var addedScopes bool + for _, scope := range additionalScopes { + if hasScope(scopes, scope) { + continue + } + scopes = append(scopes, scope) + addedScopes = true + } + + now := th.clock.Now() + if now.After(th.tokenExpiration) || addedScopes { + token, expiration, err := th.fetchToken(params, scopes) + if err != nil { + return "", err + } + + // do not update cache for added scope tokens + if !addedScopes { + th.tokenCache = token + th.tokenExpiration = expiration + } + + return token, nil + } + + return th.tokenCache, nil +} + +func hasScope(scopes []string, scope string) bool { + for _, s := range scopes { + if s == scope { + return true + } + } + return false +} + +type postTokenResponse struct { + AccessToken string `json:"access_token"` + RefreshToken string `json:"refresh_token"` + ExpiresIn int `json:"expires_in"` + IssuedAt time.Time `json:"issued_at"` + Scope string `json:"scope"` +} + +func (th *tokenHandler) fetchTokenWithOAuth(realm *url.URL, refreshToken, service string, scopes []string) (token string, expiration time.Time, err error) { + form := url.Values{} + form.Set("scope", strings.Join(scopes, " ")) + form.Set("service", service) + + clientID := th.clientID + if clientID == "" { + // Use default client, this is a required field + clientID = defaultClientID + } + form.Set("client_id", clientID) + + if refreshToken != "" { + form.Set("grant_type", "refresh_token") + form.Set("refresh_token", refreshToken) + } else if th.creds != nil { + form.Set("grant_type", "password") + username, password := th.creds.Basic(realm) + form.Set("username", username) + form.Set("password", password) + + // attempt to get a refresh token + form.Set("access_type", "offline") + } else { + // refuse to do oauth without a grant type + return "", time.Time{}, fmt.Errorf("no supported grant type") + } + + resp, err := th.client().PostForm(realm.String(), form) + if err != nil { + return "", time.Time{}, err + } + defer resp.Body.Close() + + if !client.SuccessStatus(resp.StatusCode) { + err := client.HandleErrorResponse(resp) + return "", time.Time{}, err + } + + decoder := json.NewDecoder(resp.Body) + + var tr postTokenResponse + if err = decoder.Decode(&tr); err != nil { + return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err) + } + + if tr.RefreshToken != "" && tr.RefreshToken != refreshToken { + th.creds.SetRefreshToken(realm, service, tr.RefreshToken) + } + + if tr.ExpiresIn < minimumTokenLifetimeSeconds { + // The default/minimum lifetime. + tr.ExpiresIn = minimumTokenLifetimeSeconds + logDebugf(th.logger, "Increasing token expiration to: %d seconds", tr.ExpiresIn) + } + + if tr.IssuedAt.IsZero() { + // issued_at is optional in the token response. + tr.IssuedAt = th.clock.Now().UTC() + } + + return tr.AccessToken, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil +} + +type getTokenResponse struct { + Token string `json:"token"` + AccessToken string `json:"access_token"` + ExpiresIn int `json:"expires_in"` + IssuedAt time.Time `json:"issued_at"` + RefreshToken string `json:"refresh_token"` +} + +func (th *tokenHandler) fetchTokenWithBasicAuth(realm *url.URL, service string, scopes []string) (token string, expiration time.Time, err error) { + + req, err := http.NewRequest("GET", realm.String(), nil) + if err != nil { + return "", time.Time{}, err + } + + reqParams := req.URL.Query() + + if service != "" { + reqParams.Add("service", service) + } + + for _, scope := range scopes { + reqParams.Add("scope", scope) + } + + if th.offlineAccess { + reqParams.Add("offline_token", "true") + clientID := th.clientID + if clientID == "" { + clientID = defaultClientID + } + reqParams.Add("client_id", clientID) + } + + if th.creds != nil { + username, password := th.creds.Basic(realm) + if username != "" && password != "" { + reqParams.Add("account", username) + req.SetBasicAuth(username, password) + } + } + + req.URL.RawQuery = reqParams.Encode() + + resp, err := th.client().Do(req) + if err != nil { + return "", time.Time{}, err + } + defer resp.Body.Close() + + if !client.SuccessStatus(resp.StatusCode) { + err := client.HandleErrorResponse(resp) + return "", time.Time{}, err + } + + decoder := json.NewDecoder(resp.Body) + + var tr getTokenResponse + if err = decoder.Decode(&tr); err != nil { + return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err) + } + + if tr.RefreshToken != "" && th.creds != nil { + th.creds.SetRefreshToken(realm, service, tr.RefreshToken) + } + + // `access_token` is equivalent to `token` and if both are specified + // the choice is undefined. Canonicalize `access_token` by sticking + // things in `token`. + if tr.AccessToken != "" { + tr.Token = tr.AccessToken + } + + if tr.Token == "" { + return "", time.Time{}, ErrNoToken + } + + if tr.ExpiresIn < minimumTokenLifetimeSeconds { + // The default/minimum lifetime. + tr.ExpiresIn = minimumTokenLifetimeSeconds + logDebugf(th.logger, "Increasing token expiration to: %d seconds", tr.ExpiresIn) + } + + if tr.IssuedAt.IsZero() { + // issued_at is optional in the token response. + tr.IssuedAt = th.clock.Now().UTC() + } + + return tr.Token, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil +} + +func (th *tokenHandler) fetchToken(params map[string]string, scopes []string) (token string, expiration time.Time, err error) { + realm, ok := params["realm"] + if !ok { + return "", time.Time{}, errors.New("no realm specified for token auth challenge") + } + + // TODO(dmcgowan): Handle empty scheme and relative realm + realmURL, err := url.Parse(realm) + if err != nil { + return "", time.Time{}, fmt.Errorf("invalid token auth challenge realm: %s", err) + } + + service := params["service"] + + var refreshToken string + + if th.creds != nil { + refreshToken = th.creds.RefreshToken(realmURL, service) + } + + if refreshToken != "" || th.forceOAuth { + return th.fetchTokenWithOAuth(realmURL, refreshToken, service, scopes) + } + + return th.fetchTokenWithBasicAuth(realmURL, service, scopes) +} + +type basicHandler struct { + creds CredentialStore +} + +// NewBasicHandler creaters a new authentiation handler which adds +// basic authentication credentials to a request. +func NewBasicHandler(creds CredentialStore) AuthenticationHandler { + return &basicHandler{ + creds: creds, + } +} + +func (*basicHandler) Scheme() string { + return "basic" +} + +func (bh *basicHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { + if bh.creds != nil { + username, password := bh.creds.Basic(req.URL) + if username != "" && password != "" { + req.SetBasicAuth(username, password) + return nil + } + } + return ErrNoBasicAuthCredentials +} diff --git a/vendor/github.com/docker/distribution/registry/client/blob_writer.go b/vendor/github.com/docker/distribution/registry/client/blob_writer.go new file mode 100644 index 0000000000..695bf852f1 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/blob_writer.go @@ -0,0 +1,162 @@ +package client + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "net/http" + "time" + + "github.com/docker/distribution" +) + +type httpBlobUpload struct { + statter distribution.BlobStatter + client *http.Client + + uuid string + startedAt time.Time + + location string // always the last value of the location header. + offset int64 + closed bool +} + +func (hbu *httpBlobUpload) Reader() (io.ReadCloser, error) { + panic("Not implemented") +} + +func (hbu *httpBlobUpload) handleErrorResponse(resp *http.Response) error { + if resp.StatusCode == http.StatusNotFound { + return distribution.ErrBlobUploadUnknown + } + return HandleErrorResponse(resp) +} + +func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) { + req, err := http.NewRequest("PATCH", hbu.location, ioutil.NopCloser(r)) + if err != nil { + return 0, err + } + defer req.Body.Close() + + resp, err := hbu.client.Do(req) + if err != nil { + return 0, err + } + + if !SuccessStatus(resp.StatusCode) { + return 0, hbu.handleErrorResponse(resp) + } + + hbu.uuid = resp.Header.Get("Docker-Upload-UUID") + hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) + if err != nil { + return 0, err + } + rng := resp.Header.Get("Range") + var start, end int64 + if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { + return 0, err + } else if n != 2 || end < start { + return 0, fmt.Errorf("bad range format: %s", rng) + } + + return (end - start + 1), nil + +} + +func (hbu *httpBlobUpload) Write(p []byte) (n int, err error) { + req, err := http.NewRequest("PATCH", hbu.location, bytes.NewReader(p)) + if err != nil { + return 0, err + } + req.Header.Set("Content-Range", fmt.Sprintf("%d-%d", hbu.offset, hbu.offset+int64(len(p)-1))) + req.Header.Set("Content-Length", fmt.Sprintf("%d", len(p))) + req.Header.Set("Content-Type", "application/octet-stream") + + resp, err := hbu.client.Do(req) + if err != nil { + return 0, err + } + + if !SuccessStatus(resp.StatusCode) { + return 0, hbu.handleErrorResponse(resp) + } + + hbu.uuid = resp.Header.Get("Docker-Upload-UUID") + hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) + if err != nil { + return 0, err + } + rng := resp.Header.Get("Range") + var start, end int + if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { + return 0, err + } else if n != 2 || end < start { + return 0, fmt.Errorf("bad range format: %s", rng) + } + + return (end - start + 1), nil + +} + +func (hbu *httpBlobUpload) Size() int64 { + return hbu.offset +} + +func (hbu *httpBlobUpload) ID() string { + return hbu.uuid +} + +func (hbu *httpBlobUpload) StartedAt() time.Time { + return hbu.startedAt +} + +func (hbu *httpBlobUpload) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { + // TODO(dmcgowan): Check if already finished, if so just fetch + req, err := http.NewRequest("PUT", hbu.location, nil) + if err != nil { + return distribution.Descriptor{}, err + } + + values := req.URL.Query() + values.Set("digest", desc.Digest.String()) + req.URL.RawQuery = values.Encode() + + resp, err := hbu.client.Do(req) + if err != nil { + return distribution.Descriptor{}, err + } + defer resp.Body.Close() + + if !SuccessStatus(resp.StatusCode) { + return distribution.Descriptor{}, hbu.handleErrorResponse(resp) + } + + return hbu.statter.Stat(ctx, desc.Digest) +} + +func (hbu *httpBlobUpload) Cancel(ctx context.Context) error { + req, err := http.NewRequest("DELETE", hbu.location, nil) + if err != nil { + return err + } + resp, err := hbu.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if resp.StatusCode == http.StatusNotFound || SuccessStatus(resp.StatusCode) { + return nil + } + return hbu.handleErrorResponse(resp) +} + +func (hbu *httpBlobUpload) Close() error { + hbu.closed = true + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/client/errors.go b/vendor/github.com/docker/distribution/registry/client/errors.go new file mode 100644 index 0000000000..52d49d5d29 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/errors.go @@ -0,0 +1,139 @@ +package client + +import ( + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/distribution/registry/client/auth/challenge" +) + +// ErrNoErrorsInBody is returned when an HTTP response body parses to an empty +// errcode.Errors slice. +var ErrNoErrorsInBody = errors.New("no error details found in HTTP response body") + +// UnexpectedHTTPStatusError is returned when an unexpected HTTP status is +// returned when making a registry api call. +type UnexpectedHTTPStatusError struct { + Status string +} + +func (e *UnexpectedHTTPStatusError) Error() string { + return fmt.Sprintf("received unexpected HTTP status: %s", e.Status) +} + +// UnexpectedHTTPResponseError is returned when an expected HTTP status code +// is returned, but the content was unexpected and failed to be parsed. +type UnexpectedHTTPResponseError struct { + ParseErr error + StatusCode int + Response []byte +} + +func (e *UnexpectedHTTPResponseError) Error() string { + return fmt.Sprintf("error parsing HTTP %d response body: %s: %q", e.StatusCode, e.ParseErr.Error(), string(e.Response)) +} + +func parseHTTPErrorResponse(statusCode int, r io.Reader) error { + var errors errcode.Errors + body, err := ioutil.ReadAll(r) + if err != nil { + return err + } + + // For backward compatibility, handle irregularly formatted + // messages that contain a "details" field. + var detailsErr struct { + Details string `json:"details"` + } + err = json.Unmarshal(body, &detailsErr) + if err == nil && detailsErr.Details != "" { + switch statusCode { + case http.StatusUnauthorized: + return errcode.ErrorCodeUnauthorized.WithMessage(detailsErr.Details) + case http.StatusTooManyRequests: + return errcode.ErrorCodeTooManyRequests.WithMessage(detailsErr.Details) + default: + return errcode.ErrorCodeUnknown.WithMessage(detailsErr.Details) + } + } + + if err := json.Unmarshal(body, &errors); err != nil { + return &UnexpectedHTTPResponseError{ + ParseErr: err, + StatusCode: statusCode, + Response: body, + } + } + + if len(errors) == 0 { + // If there was no error specified in the body, return + // UnexpectedHTTPResponseError. + return &UnexpectedHTTPResponseError{ + ParseErr: ErrNoErrorsInBody, + StatusCode: statusCode, + Response: body, + } + } + + return errors +} + +func makeErrorList(err error) []error { + if errL, ok := err.(errcode.Errors); ok { + return []error(errL) + } + return []error{err} +} + +func mergeErrors(err1, err2 error) error { + return errcode.Errors(append(makeErrorList(err1), makeErrorList(err2)...)) +} + +// HandleErrorResponse returns error parsed from HTTP response for an +// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An +// UnexpectedHTTPStatusError returned for response code outside of expected +// range. +func HandleErrorResponse(resp *http.Response) error { + if resp.StatusCode >= 400 && resp.StatusCode < 500 { + // Check for OAuth errors within the `WWW-Authenticate` header first + // See https://tools.ietf.org/html/rfc6750#section-3 + for _, c := range challenge.ResponseChallenges(resp) { + if c.Scheme == "bearer" { + var err errcode.Error + // codes defined at https://tools.ietf.org/html/rfc6750#section-3.1 + switch c.Parameters["error"] { + case "invalid_token": + err.Code = errcode.ErrorCodeUnauthorized + case "insufficient_scope": + err.Code = errcode.ErrorCodeDenied + default: + continue + } + if description := c.Parameters["error_description"]; description != "" { + err.Message = description + } else { + err.Message = err.Code.Message() + } + + return mergeErrors(err, parseHTTPErrorResponse(resp.StatusCode, resp.Body)) + } + } + err := parseHTTPErrorResponse(resp.StatusCode, resp.Body) + if uErr, ok := err.(*UnexpectedHTTPResponseError); ok && resp.StatusCode == 401 { + return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response) + } + return err + } + return &UnexpectedHTTPStatusError{Status: resp.Status} +} + +// SuccessStatus returns true if the argument is a successful HTTP response +// code (in the range 200 - 399 inclusive). +func SuccessStatus(status int) bool { + return status >= 200 && status <= 399 +} diff --git a/vendor/github.com/docker/distribution/registry/client/repository.go b/vendor/github.com/docker/distribution/registry/client/repository.go new file mode 100644 index 0000000000..aa442e6540 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/repository.go @@ -0,0 +1,867 @@ +package client + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" + "strings" + "time" + + "github.com/docker/distribution" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/api/v2" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/distribution/registry/storage/cache" + "github.com/docker/distribution/registry/storage/cache/memory" + "github.com/opencontainers/go-digest" +) + +// Registry provides an interface for calling Repositories, which returns a catalog of repositories. +type Registry interface { + Repositories(ctx context.Context, repos []string, last string) (n int, err error) +} + +// checkHTTPRedirect is a callback that can manipulate redirected HTTP +// requests. It is used to preserve Accept and Range headers. +func checkHTTPRedirect(req *http.Request, via []*http.Request) error { + if len(via) >= 10 { + return errors.New("stopped after 10 redirects") + } + + if len(via) > 0 { + for headerName, headerVals := range via[0].Header { + if headerName != "Accept" && headerName != "Range" { + continue + } + for _, val := range headerVals { + // Don't add to redirected request if redirected + // request already has a header with the same + // name and value. + hasValue := false + for _, existingVal := range req.Header[headerName] { + if existingVal == val { + hasValue = true + break + } + } + if !hasValue { + req.Header.Add(headerName, val) + } + } + } + } + + return nil +} + +// NewRegistry creates a registry namespace which can be used to get a listing of repositories +func NewRegistry(baseURL string, transport http.RoundTripper) (Registry, error) { + ub, err := v2.NewURLBuilderFromString(baseURL, false) + if err != nil { + return nil, err + } + + client := &http.Client{ + Transport: transport, + Timeout: 1 * time.Minute, + CheckRedirect: checkHTTPRedirect, + } + + return ®istry{ + client: client, + ub: ub, + }, nil +} + +type registry struct { + client *http.Client + ub *v2.URLBuilder +} + +// Repositories returns a lexigraphically sorted catalog given a base URL. The 'entries' slice will be filled up to the size +// of the slice, starting at the value provided in 'last'. The number of entries will be returned along with io.EOF if there +// are no more entries +func (r *registry) Repositories(ctx context.Context, entries []string, last string) (int, error) { + var numFilled int + var returnErr error + + values := buildCatalogValues(len(entries), last) + u, err := r.ub.BuildCatalogURL(values) + if err != nil { + return 0, err + } + + resp, err := r.client.Get(u) + if err != nil { + return 0, err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + var ctlg struct { + Repositories []string `json:"repositories"` + } + decoder := json.NewDecoder(resp.Body) + + if err := decoder.Decode(&ctlg); err != nil { + return 0, err + } + + for cnt := range ctlg.Repositories { + entries[cnt] = ctlg.Repositories[cnt] + } + numFilled = len(ctlg.Repositories) + + link := resp.Header.Get("Link") + if link == "" { + returnErr = io.EOF + } + } else { + return 0, HandleErrorResponse(resp) + } + + return numFilled, returnErr +} + +// NewRepository creates a new Repository for the given repository name and base URL. +func NewRepository(name reference.Named, baseURL string, transport http.RoundTripper) (distribution.Repository, error) { + ub, err := v2.NewURLBuilderFromString(baseURL, false) + if err != nil { + return nil, err + } + + client := &http.Client{ + Transport: transport, + CheckRedirect: checkHTTPRedirect, + // TODO(dmcgowan): create cookie jar + } + + return &repository{ + client: client, + ub: ub, + name: name, + }, nil +} + +type repository struct { + client *http.Client + ub *v2.URLBuilder + name reference.Named +} + +func (r *repository) Named() reference.Named { + return r.name +} + +func (r *repository) Blobs(ctx context.Context) distribution.BlobStore { + statter := &blobStatter{ + name: r.name, + ub: r.ub, + client: r.client, + } + return &blobs{ + name: r.name, + ub: r.ub, + client: r.client, + statter: cache.NewCachedBlobStatter(memory.NewInMemoryBlobDescriptorCacheProvider(), statter), + } +} + +func (r *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { + // todo(richardscothern): options should be sent over the wire + return &manifests{ + name: r.name, + ub: r.ub, + client: r.client, + etags: make(map[string]string), + }, nil +} + +func (r *repository) Tags(ctx context.Context) distribution.TagService { + return &tags{ + client: r.client, + ub: r.ub, + name: r.Named(), + } +} + +// tags implements remote tagging operations. +type tags struct { + client *http.Client + ub *v2.URLBuilder + name reference.Named +} + +// All returns all tags +func (t *tags) All(ctx context.Context) ([]string, error) { + var tags []string + + listURLStr, err := t.ub.BuildTagsURL(t.name) + if err != nil { + return tags, err + } + + listURL, err := url.Parse(listURLStr) + if err != nil { + return tags, err + } + + for { + resp, err := t.client.Get(listURL.String()) + if err != nil { + return tags, err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + return tags, err + } + + tagsResponse := struct { + Tags []string `json:"tags"` + }{} + if err := json.Unmarshal(b, &tagsResponse); err != nil { + return tags, err + } + tags = append(tags, tagsResponse.Tags...) + if link := resp.Header.Get("Link"); link != "" { + linkURLStr := strings.Trim(strings.Split(link, ";")[0], "<>") + linkURL, err := url.Parse(linkURLStr) + if err != nil { + return tags, err + } + + listURL = listURL.ResolveReference(linkURL) + } else { + return tags, nil + } + } else { + return tags, HandleErrorResponse(resp) + } + } +} + +func descriptorFromResponse(response *http.Response) (distribution.Descriptor, error) { + desc := distribution.Descriptor{} + headers := response.Header + + ctHeader := headers.Get("Content-Type") + if ctHeader == "" { + return distribution.Descriptor{}, errors.New("missing or empty Content-Type header") + } + desc.MediaType = ctHeader + + digestHeader := headers.Get("Docker-Content-Digest") + if digestHeader == "" { + bytes, err := ioutil.ReadAll(response.Body) + if err != nil { + return distribution.Descriptor{}, err + } + _, desc, err := distribution.UnmarshalManifest(ctHeader, bytes) + if err != nil { + return distribution.Descriptor{}, err + } + return desc, nil + } + + dgst, err := digest.Parse(digestHeader) + if err != nil { + return distribution.Descriptor{}, err + } + desc.Digest = dgst + + lengthHeader := headers.Get("Content-Length") + if lengthHeader == "" { + return distribution.Descriptor{}, errors.New("missing or empty Content-Length header") + } + length, err := strconv.ParseInt(lengthHeader, 10, 64) + if err != nil { + return distribution.Descriptor{}, err + } + desc.Size = length + + return desc, nil + +} + +// Get issues a HEAD request for a Manifest against its named endpoint in order +// to construct a descriptor for the tag. If the registry doesn't support HEADing +// a manifest, fallback to GET. +func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { + ref, err := reference.WithTag(t.name, tag) + if err != nil { + return distribution.Descriptor{}, err + } + u, err := t.ub.BuildManifestURL(ref) + if err != nil { + return distribution.Descriptor{}, err + } + + newRequest := func(method string) (*http.Response, error) { + req, err := http.NewRequest(method, u, nil) + if err != nil { + return nil, err + } + + for _, t := range distribution.ManifestMediaTypes() { + req.Header.Add("Accept", t) + } + resp, err := t.client.Do(req) + return resp, err + } + + resp, err := newRequest("HEAD") + if err != nil { + return distribution.Descriptor{}, err + } + defer resp.Body.Close() + + switch { + case resp.StatusCode >= 200 && resp.StatusCode < 400 && len(resp.Header.Get("Docker-Content-Digest")) > 0: + // if the response is a success AND a Docker-Content-Digest can be retrieved from the headers + return descriptorFromResponse(resp) + default: + // if the response is an error - there will be no body to decode. + // Issue a GET request: + // - for data from a server that does not handle HEAD + // - to get error details in case of a failure + resp, err = newRequest("GET") + if err != nil { + return distribution.Descriptor{}, err + } + defer resp.Body.Close() + + if resp.StatusCode >= 200 && resp.StatusCode < 400 { + return descriptorFromResponse(resp) + } + return distribution.Descriptor{}, HandleErrorResponse(resp) + } +} + +func (t *tags) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { + panic("not implemented") +} + +func (t *tags) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { + panic("not implemented") +} + +func (t *tags) Untag(ctx context.Context, tag string) error { + panic("not implemented") +} + +type manifests struct { + name reference.Named + ub *v2.URLBuilder + client *http.Client + etags map[string]string +} + +func (ms *manifests) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { + ref, err := reference.WithDigest(ms.name, dgst) + if err != nil { + return false, err + } + u, err := ms.ub.BuildManifestURL(ref) + if err != nil { + return false, err + } + + resp, err := ms.client.Head(u) + if err != nil { + return false, err + } + + if SuccessStatus(resp.StatusCode) { + return true, nil + } else if resp.StatusCode == http.StatusNotFound { + return false, nil + } + return false, HandleErrorResponse(resp) +} + +// AddEtagToTag allows a client to supply an eTag to Get which will be +// used for a conditional HTTP request. If the eTag matches, a nil manifest +// and ErrManifestNotModified error will be returned. etag is automatically +// quoted when added to this map. +func AddEtagToTag(tag, etag string) distribution.ManifestServiceOption { + return etagOption{tag, etag} +} + +type etagOption struct{ tag, etag string } + +func (o etagOption) Apply(ms distribution.ManifestService) error { + if ms, ok := ms.(*manifests); ok { + ms.etags[o.tag] = fmt.Sprintf(`"%s"`, o.etag) + return nil + } + return fmt.Errorf("etag options is a client-only option") +} + +// ReturnContentDigest allows a client to set a the content digest on +// a successful request from the 'Docker-Content-Digest' header. This +// returned digest is represents the digest which the registry uses +// to refer to the content and can be used to delete the content. +func ReturnContentDigest(dgst *digest.Digest) distribution.ManifestServiceOption { + return contentDigestOption{dgst} +} + +type contentDigestOption struct{ digest *digest.Digest } + +func (o contentDigestOption) Apply(ms distribution.ManifestService) error { + return nil +} + +func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { + var ( + digestOrTag string + ref reference.Named + err error + contentDgst *digest.Digest + mediaTypes []string + ) + + for _, option := range options { + switch opt := option.(type) { + case distribution.WithTagOption: + digestOrTag = opt.Tag + ref, err = reference.WithTag(ms.name, opt.Tag) + if err != nil { + return nil, err + } + case contentDigestOption: + contentDgst = opt.digest + case distribution.WithManifestMediaTypesOption: + mediaTypes = opt.MediaTypes + default: + err := option.Apply(ms) + if err != nil { + return nil, err + } + } + } + + if digestOrTag == "" { + digestOrTag = dgst.String() + ref, err = reference.WithDigest(ms.name, dgst) + if err != nil { + return nil, err + } + } + + if len(mediaTypes) == 0 { + mediaTypes = distribution.ManifestMediaTypes() + } + + u, err := ms.ub.BuildManifestURL(ref) + if err != nil { + return nil, err + } + + req, err := http.NewRequest("GET", u, nil) + if err != nil { + return nil, err + } + + for _, t := range mediaTypes { + req.Header.Add("Accept", t) + } + + if _, ok := ms.etags[digestOrTag]; ok { + req.Header.Set("If-None-Match", ms.etags[digestOrTag]) + } + + resp, err := ms.client.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + if resp.StatusCode == http.StatusNotModified { + return nil, distribution.ErrManifestNotModified + } else if SuccessStatus(resp.StatusCode) { + if contentDgst != nil { + dgst, err := digest.Parse(resp.Header.Get("Docker-Content-Digest")) + if err == nil { + *contentDgst = dgst + } + } + mt := resp.Header.Get("Content-Type") + body, err := ioutil.ReadAll(resp.Body) + + if err != nil { + return nil, err + } + m, _, err := distribution.UnmarshalManifest(mt, body) + if err != nil { + return nil, err + } + return m, nil + } + return nil, HandleErrorResponse(resp) +} + +// Put puts a manifest. A tag can be specified using an options parameter which uses some shared state to hold the +// tag name in order to build the correct upload URL. +func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { + ref := ms.name + var tagged bool + + for _, option := range options { + if opt, ok := option.(distribution.WithTagOption); ok { + var err error + ref, err = reference.WithTag(ref, opt.Tag) + if err != nil { + return "", err + } + tagged = true + } else { + err := option.Apply(ms) + if err != nil { + return "", err + } + } + } + mediaType, p, err := m.Payload() + if err != nil { + return "", err + } + + if !tagged { + // generate a canonical digest and Put by digest + _, d, err := distribution.UnmarshalManifest(mediaType, p) + if err != nil { + return "", err + } + ref, err = reference.WithDigest(ref, d.Digest) + if err != nil { + return "", err + } + } + + manifestURL, err := ms.ub.BuildManifestURL(ref) + if err != nil { + return "", err + } + + putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(p)) + if err != nil { + return "", err + } + + putRequest.Header.Set("Content-Type", mediaType) + + resp, err := ms.client.Do(putRequest) + if err != nil { + return "", err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + dgstHeader := resp.Header.Get("Docker-Content-Digest") + dgst, err := digest.Parse(dgstHeader) + if err != nil { + return "", err + } + + return dgst, nil + } + + return "", HandleErrorResponse(resp) +} + +func (ms *manifests) Delete(ctx context.Context, dgst digest.Digest) error { + ref, err := reference.WithDigest(ms.name, dgst) + if err != nil { + return err + } + u, err := ms.ub.BuildManifestURL(ref) + if err != nil { + return err + } + req, err := http.NewRequest("DELETE", u, nil) + if err != nil { + return err + } + + resp, err := ms.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + return nil + } + return HandleErrorResponse(resp) +} + +// todo(richardscothern): Restore interface and implementation with merge of #1050 +/*func (ms *manifests) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) { + panic("not supported") +}*/ + +type blobs struct { + name reference.Named + ub *v2.URLBuilder + client *http.Client + + statter distribution.BlobDescriptorService + distribution.BlobDeleter +} + +func sanitizeLocation(location, base string) (string, error) { + baseURL, err := url.Parse(base) + if err != nil { + return "", err + } + + locationURL, err := url.Parse(location) + if err != nil { + return "", err + } + + return baseURL.ResolveReference(locationURL).String(), nil +} + +func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + return bs.statter.Stat(ctx, dgst) + +} + +func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { + reader, err := bs.Open(ctx, dgst) + if err != nil { + return nil, err + } + defer reader.Close() + + return ioutil.ReadAll(reader) +} + +func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { + ref, err := reference.WithDigest(bs.name, dgst) + if err != nil { + return nil, err + } + blobURL, err := bs.ub.BuildBlobURL(ref) + if err != nil { + return nil, err + } + + return transport.NewHTTPReadSeeker(bs.client, blobURL, + func(resp *http.Response) error { + if resp.StatusCode == http.StatusNotFound { + return distribution.ErrBlobUnknown + } + return HandleErrorResponse(resp) + }), nil +} + +func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { + panic("not implemented") +} + +func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { + writer, err := bs.Create(ctx) + if err != nil { + return distribution.Descriptor{}, err + } + dgstr := digest.Canonical.Digester() + n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr.Hash())) + if err != nil { + return distribution.Descriptor{}, err + } + if n < int64(len(p)) { + return distribution.Descriptor{}, fmt.Errorf("short copy: wrote %d of %d", n, len(p)) + } + + desc := distribution.Descriptor{ + MediaType: mediaType, + Size: int64(len(p)), + Digest: dgstr.Digest(), + } + + return writer.Commit(ctx, desc) +} + +type optionFunc func(interface{}) error + +func (f optionFunc) Apply(v interface{}) error { + return f(v) +} + +// WithMountFrom returns a BlobCreateOption which designates that the blob should be +// mounted from the given canonical reference. +func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption { + return optionFunc(func(v interface{}) error { + opts, ok := v.(*distribution.CreateOptions) + if !ok { + return fmt.Errorf("unexpected options type: %T", v) + } + + opts.Mount.ShouldMount = true + opts.Mount.From = ref + + return nil + }) +} + +func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { + var opts distribution.CreateOptions + + for _, option := range options { + err := option.Apply(&opts) + if err != nil { + return nil, err + } + } + + var values []url.Values + + if opts.Mount.ShouldMount { + values = append(values, url.Values{"from": {opts.Mount.From.Name()}, "mount": {opts.Mount.From.Digest().String()}}) + } + + u, err := bs.ub.BuildBlobUploadURL(bs.name, values...) + if err != nil { + return nil, err + } + + resp, err := bs.client.Post(u, "", nil) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusCreated: + desc, err := bs.statter.Stat(ctx, opts.Mount.From.Digest()) + if err != nil { + return nil, err + } + return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc} + case http.StatusAccepted: + // TODO(dmcgowan): Check for invalid UUID + uuid := resp.Header.Get("Docker-Upload-UUID") + location, err := sanitizeLocation(resp.Header.Get("Location"), u) + if err != nil { + return nil, err + } + + return &httpBlobUpload{ + statter: bs.statter, + client: bs.client, + uuid: uuid, + startedAt: time.Now(), + location: location, + }, nil + default: + return nil, HandleErrorResponse(resp) + } +} + +func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { + panic("not implemented") +} + +func (bs *blobs) Delete(ctx context.Context, dgst digest.Digest) error { + return bs.statter.Clear(ctx, dgst) +} + +type blobStatter struct { + name reference.Named + ub *v2.URLBuilder + client *http.Client +} + +func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + ref, err := reference.WithDigest(bs.name, dgst) + if err != nil { + return distribution.Descriptor{}, err + } + u, err := bs.ub.BuildBlobURL(ref) + if err != nil { + return distribution.Descriptor{}, err + } + + resp, err := bs.client.Head(u) + if err != nil { + return distribution.Descriptor{}, err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + lengthHeader := resp.Header.Get("Content-Length") + if lengthHeader == "" { + return distribution.Descriptor{}, fmt.Errorf("missing content-length header for request: %s", u) + } + + length, err := strconv.ParseInt(lengthHeader, 10, 64) + if err != nil { + return distribution.Descriptor{}, fmt.Errorf("error parsing content-length: %v", err) + } + + return distribution.Descriptor{ + MediaType: resp.Header.Get("Content-Type"), + Size: length, + Digest: dgst, + }, nil + } else if resp.StatusCode == http.StatusNotFound { + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + return distribution.Descriptor{}, HandleErrorResponse(resp) +} + +func buildCatalogValues(maxEntries int, last string) url.Values { + values := url.Values{} + + if maxEntries > 0 { + values.Add("n", strconv.Itoa(maxEntries)) + } + + if last != "" { + values.Add("last", last) + } + + return values +} + +func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { + ref, err := reference.WithDigest(bs.name, dgst) + if err != nil { + return err + } + blobURL, err := bs.ub.BuildBlobURL(ref) + if err != nil { + return err + } + + req, err := http.NewRequest("DELETE", blobURL, nil) + if err != nil { + return err + } + + resp, err := bs.client.Do(req) + if err != nil { + return err + } + defer resp.Body.Close() + + if SuccessStatus(resp.StatusCode) { + return nil + } + return HandleErrorResponse(resp) +} + +func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go b/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go new file mode 100644 index 0000000000..1d0b382fb5 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/transport/http_reader.go @@ -0,0 +1,250 @@ +package transport + +import ( + "errors" + "fmt" + "io" + "net/http" + "regexp" + "strconv" +) + +var ( + contentRangeRegexp = regexp.MustCompile(`bytes ([0-9]+)-([0-9]+)/([0-9]+|\\*)`) + + // ErrWrongCodeForByteRange is returned if the client sends a request + // with a Range header but the server returns a 2xx or 3xx code other + // than 206 Partial Content. + ErrWrongCodeForByteRange = errors.New("expected HTTP 206 from byte range request") +) + +// ReadSeekCloser combines io.ReadSeeker with io.Closer. +type ReadSeekCloser interface { + io.ReadSeeker + io.Closer +} + +// NewHTTPReadSeeker handles reading from an HTTP endpoint using a GET +// request. When seeking and starting a read from a non-zero offset +// the a "Range" header will be added which sets the offset. +// TODO(dmcgowan): Move this into a separate utility package +func NewHTTPReadSeeker(client *http.Client, url string, errorHandler func(*http.Response) error) ReadSeekCloser { + return &httpReadSeeker{ + client: client, + url: url, + errorHandler: errorHandler, + } +} + +type httpReadSeeker struct { + client *http.Client + url string + + // errorHandler creates an error from an unsuccessful HTTP response. + // This allows the error to be created with the HTTP response body + // without leaking the body through a returned error. + errorHandler func(*http.Response) error + + size int64 + + // rc is the remote read closer. + rc io.ReadCloser + // readerOffset tracks the offset as of the last read. + readerOffset int64 + // seekOffset allows Seek to override the offset. Seek changes + // seekOffset instead of changing readOffset directly so that + // connection resets can be delayed and possibly avoided if the + // seek is undone (i.e. seeking to the end and then back to the + // beginning). + seekOffset int64 + err error +} + +func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) { + if hrs.err != nil { + return 0, hrs.err + } + + // If we sought to a different position, we need to reset the + // connection. This logic is here instead of Seek so that if + // a seek is undone before the next read, the connection doesn't + // need to be closed and reopened. A common example of this is + // seeking to the end to determine the length, and then seeking + // back to the original position. + if hrs.readerOffset != hrs.seekOffset { + hrs.reset() + } + + hrs.readerOffset = hrs.seekOffset + + rd, err := hrs.reader() + if err != nil { + return 0, err + } + + n, err = rd.Read(p) + hrs.seekOffset += int64(n) + hrs.readerOffset += int64(n) + + return n, err +} + +func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) { + if hrs.err != nil { + return 0, hrs.err + } + + lastReaderOffset := hrs.readerOffset + + if whence == io.SeekStart && hrs.rc == nil { + // If no request has been made yet, and we are seeking to an + // absolute position, set the read offset as well to avoid an + // unnecessary request. + hrs.readerOffset = offset + } + + _, err := hrs.reader() + if err != nil { + hrs.readerOffset = lastReaderOffset + return 0, err + } + + newOffset := hrs.seekOffset + + switch whence { + case io.SeekCurrent: + newOffset += offset + case io.SeekEnd: + if hrs.size < 0 { + return 0, errors.New("content length not known") + } + newOffset = hrs.size + offset + case io.SeekStart: + newOffset = offset + } + + if newOffset < 0 { + err = errors.New("cannot seek to negative position") + } else { + hrs.seekOffset = newOffset + } + + return hrs.seekOffset, err +} + +func (hrs *httpReadSeeker) Close() error { + if hrs.err != nil { + return hrs.err + } + + // close and release reader chain + if hrs.rc != nil { + hrs.rc.Close() + } + + hrs.rc = nil + + hrs.err = errors.New("httpLayer: closed") + + return nil +} + +func (hrs *httpReadSeeker) reset() { + if hrs.err != nil { + return + } + if hrs.rc != nil { + hrs.rc.Close() + hrs.rc = nil + } +} + +func (hrs *httpReadSeeker) reader() (io.Reader, error) { + if hrs.err != nil { + return nil, hrs.err + } + + if hrs.rc != nil { + return hrs.rc, nil + } + + req, err := http.NewRequest("GET", hrs.url, nil) + if err != nil { + return nil, err + } + + if hrs.readerOffset > 0 { + // If we are at different offset, issue a range request from there. + req.Header.Add("Range", fmt.Sprintf("bytes=%d-", hrs.readerOffset)) + // TODO: get context in here + // context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range")) + } + + req.Header.Add("Accept-Encoding", "identity") + resp, err := hrs.client.Do(req) + if err != nil { + return nil, err + } + + // Normally would use client.SuccessStatus, but that would be a cyclic + // import + if resp.StatusCode >= 200 && resp.StatusCode <= 399 { + if hrs.readerOffset > 0 { + if resp.StatusCode != http.StatusPartialContent { + return nil, ErrWrongCodeForByteRange + } + + contentRange := resp.Header.Get("Content-Range") + if contentRange == "" { + return nil, errors.New("no Content-Range header found in HTTP 206 response") + } + + submatches := contentRangeRegexp.FindStringSubmatch(contentRange) + if len(submatches) < 4 { + return nil, fmt.Errorf("could not parse Content-Range header: %s", contentRange) + } + + startByte, err := strconv.ParseUint(submatches[1], 10, 64) + if err != nil { + return nil, fmt.Errorf("could not parse start of range in Content-Range header: %s", contentRange) + } + + if startByte != uint64(hrs.readerOffset) { + return nil, fmt.Errorf("received Content-Range starting at offset %d instead of requested %d", startByte, hrs.readerOffset) + } + + endByte, err := strconv.ParseUint(submatches[2], 10, 64) + if err != nil { + return nil, fmt.Errorf("could not parse end of range in Content-Range header: %s", contentRange) + } + + if submatches[3] == "*" { + hrs.size = -1 + } else { + size, err := strconv.ParseUint(submatches[3], 10, 64) + if err != nil { + return nil, fmt.Errorf("could not parse total size in Content-Range header: %s", contentRange) + } + + if endByte+1 != size { + return nil, fmt.Errorf("range in Content-Range stops before the end of the content: %s", contentRange) + } + + hrs.size = int64(size) + } + } else if resp.StatusCode == http.StatusOK { + hrs.size = resp.ContentLength + } else { + hrs.size = -1 + } + hrs.rc = resp.Body + } else { + defer resp.Body.Close() + if hrs.errorHandler != nil { + return nil, hrs.errorHandler(resp) + } + return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status) + } + + return hrs.rc, nil +} diff --git a/vendor/github.com/docker/distribution/registry/client/transport/transport.go b/vendor/github.com/docker/distribution/registry/client/transport/transport.go new file mode 100644 index 0000000000..30e45fab0f --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/client/transport/transport.go @@ -0,0 +1,147 @@ +package transport + +import ( + "io" + "net/http" + "sync" +) + +// RequestModifier represents an object which will do an inplace +// modification of an HTTP request. +type RequestModifier interface { + ModifyRequest(*http.Request) error +} + +type headerModifier http.Header + +// NewHeaderRequestModifier returns a new RequestModifier which will +// add the given headers to a request. +func NewHeaderRequestModifier(header http.Header) RequestModifier { + return headerModifier(header) +} + +func (h headerModifier) ModifyRequest(req *http.Request) error { + for k, s := range http.Header(h) { + req.Header[k] = append(req.Header[k], s...) + } + + return nil +} + +// NewTransport creates a new transport which will apply modifiers to +// the request on a RoundTrip call. +func NewTransport(base http.RoundTripper, modifiers ...RequestModifier) http.RoundTripper { + return &transport{ + Modifiers: modifiers, + Base: base, + } +} + +// transport is an http.RoundTripper that makes HTTP requests after +// copying and modifying the request +type transport struct { + Modifiers []RequestModifier + Base http.RoundTripper + + mu sync.Mutex // guards modReq + modReq map[*http.Request]*http.Request // original -> modified +} + +// RoundTrip authorizes and authenticates the request with an +// access token. If no token exists or token is expired, +// tries to refresh/fetch a new token. +func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) { + req2 := cloneRequest(req) + for _, modifier := range t.Modifiers { + if err := modifier.ModifyRequest(req2); err != nil { + return nil, err + } + } + + t.setModReq(req, req2) + res, err := t.base().RoundTrip(req2) + if err != nil { + t.setModReq(req, nil) + return nil, err + } + res.Body = &onEOFReader{ + rc: res.Body, + fn: func() { t.setModReq(req, nil) }, + } + return res, nil +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (t *transport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := t.base().(canceler); ok { + t.mu.Lock() + modReq := t.modReq[req] + delete(t.modReq, req) + t.mu.Unlock() + cr.CancelRequest(modReq) + } +} + +func (t *transport) base() http.RoundTripper { + if t.Base != nil { + return t.Base + } + return http.DefaultTransport +} + +func (t *transport) setModReq(orig, mod *http.Request) { + t.mu.Lock() + defer t.mu.Unlock() + if t.modReq == nil { + t.modReq = make(map[*http.Request]*http.Request) + } + if mod == nil { + delete(t.modReq, orig) + } else { + t.modReq[orig] = mod + } +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + + return r2 +} + +type onEOFReader struct { + rc io.ReadCloser + fn func() +} + +func (r *onEOFReader) Read(p []byte) (n int, err error) { + n, err = r.rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +func (r *onEOFReader) Close() error { + err := r.rc.Close() + r.runFunc() + return err +} + +func (r *onEOFReader) runFunc() { + if fn := r.fn; fn != nil { + fn() + r.fn = nil + } +} diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/cache.go b/vendor/github.com/docker/distribution/registry/storage/cache/cache.go new file mode 100644 index 0000000000..10a3909197 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/cache/cache.go @@ -0,0 +1,35 @@ +// Package cache provides facilities to speed up access to the storage +// backend. +package cache + +import ( + "fmt" + + "github.com/docker/distribution" +) + +// BlobDescriptorCacheProvider provides repository scoped +// BlobDescriptorService cache instances and a global descriptor cache. +type BlobDescriptorCacheProvider interface { + distribution.BlobDescriptorService + + RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) +} + +// ValidateDescriptor provides a helper function to ensure that caches have +// common criteria for admitting descriptors. +func ValidateDescriptor(desc distribution.Descriptor) error { + if err := desc.Digest.Validate(); err != nil { + return err + } + + if desc.Size < 0 { + return fmt.Errorf("cache: invalid length in descriptor: %v < 0", desc.Size) + } + + if desc.MediaType == "" { + return fmt.Errorf("cache: empty mediatype on descriptor: %v", desc) + } + + return nil +} diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go b/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go new file mode 100644 index 0000000000..ac4c452117 --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go @@ -0,0 +1,129 @@ +package cache + +import ( + "context" + + "github.com/docker/distribution" + prometheus "github.com/docker/distribution/metrics" + "github.com/opencontainers/go-digest" +) + +// Metrics is used to hold metric counters +// related to the number of times a cache was +// hit or missed. +type Metrics struct { + Requests uint64 + Hits uint64 + Misses uint64 +} + +// Logger can be provided on the MetricsTracker to log errors. +// +// Usually, this is just a proxy to dcontext.GetLogger. +type Logger interface { + Errorf(format string, args ...interface{}) +} + +// MetricsTracker represents a metric tracker +// which simply counts the number of hits and misses. +type MetricsTracker interface { + Hit() + Miss() + Metrics() Metrics + Logger(context.Context) Logger +} + +type cachedBlobStatter struct { + cache distribution.BlobDescriptorService + backend distribution.BlobDescriptorService + tracker MetricsTracker +} + +var ( + // cacheCount is the number of total cache request received/hits/misses + cacheCount = prometheus.StorageNamespace.NewLabeledCounter("cache", "The number of cache request received", "type") +) + +// NewCachedBlobStatter creates a new statter which prefers a cache and +// falls back to a backend. +func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService) distribution.BlobDescriptorService { + return &cachedBlobStatter{ + cache: cache, + backend: backend, + } +} + +// NewCachedBlobStatterWithMetrics creates a new statter which prefers a cache and +// falls back to a backend. Hits and misses will send to the tracker. +func NewCachedBlobStatterWithMetrics(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService, tracker MetricsTracker) distribution.BlobStatter { + return &cachedBlobStatter{ + cache: cache, + backend: backend, + tracker: tracker, + } +} + +func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + cacheCount.WithValues("Request").Inc(1) + desc, err := cbds.cache.Stat(ctx, dgst) + if err != nil { + if err != distribution.ErrBlobUnknown { + logErrorf(ctx, cbds.tracker, "error retrieving descriptor from cache: %v", err) + } + + goto fallback + } + cacheCount.WithValues("Hit").Inc(1) + if cbds.tracker != nil { + cbds.tracker.Hit() + } + return desc, nil +fallback: + cacheCount.WithValues("Miss").Inc(1) + if cbds.tracker != nil { + cbds.tracker.Miss() + } + desc, err = cbds.backend.Stat(ctx, dgst) + if err != nil { + return desc, err + } + + if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { + logErrorf(ctx, cbds.tracker, "error adding descriptor %v to cache: %v", desc.Digest, err) + } + + return desc, err + +} + +func (cbds *cachedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) error { + err := cbds.cache.Clear(ctx, dgst) + if err != nil { + return err + } + + err = cbds.backend.Clear(ctx, dgst) + if err != nil { + return err + } + return nil +} + +func (cbds *cachedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { + logErrorf(ctx, cbds.tracker, "error adding descriptor %v to cache: %v", desc.Digest, err) + } + return nil +} + +func logErrorf(ctx context.Context, tracker MetricsTracker, format string, args ...interface{}) { + if tracker == nil { + return + } + + logger := tracker.Logger(ctx) + if logger == nil { + return + } + logger.Errorf(format, args...) +} diff --git a/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go b/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go new file mode 100644 index 0000000000..42d94d9bde --- /dev/null +++ b/vendor/github.com/docker/distribution/registry/storage/cache/memory/memory.go @@ -0,0 +1,179 @@ +package memory + +import ( + "context" + "sync" + + "github.com/docker/distribution" + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/storage/cache" + "github.com/opencontainers/go-digest" +) + +type inMemoryBlobDescriptorCacheProvider struct { + global *mapBlobDescriptorCache + repositories map[string]*mapBlobDescriptorCache + mu sync.RWMutex +} + +// NewInMemoryBlobDescriptorCacheProvider returns a new mapped-based cache for +// storing blob descriptor data. +func NewInMemoryBlobDescriptorCacheProvider() cache.BlobDescriptorCacheProvider { + return &inMemoryBlobDescriptorCacheProvider{ + global: newMapBlobDescriptorCache(), + repositories: make(map[string]*mapBlobDescriptorCache), + } +} + +func (imbdcp *inMemoryBlobDescriptorCacheProvider) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) { + if _, err := reference.ParseNormalizedNamed(repo); err != nil { + return nil, err + } + + imbdcp.mu.RLock() + defer imbdcp.mu.RUnlock() + + return &repositoryScopedInMemoryBlobDescriptorCache{ + repo: repo, + parent: imbdcp, + repository: imbdcp.repositories[repo], + }, nil +} + +func (imbdcp *inMemoryBlobDescriptorCacheProvider) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + return imbdcp.global.Stat(ctx, dgst) +} + +func (imbdcp *inMemoryBlobDescriptorCacheProvider) Clear(ctx context.Context, dgst digest.Digest) error { + return imbdcp.global.Clear(ctx, dgst) +} + +func (imbdcp *inMemoryBlobDescriptorCacheProvider) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + _, err := imbdcp.Stat(ctx, dgst) + if err == distribution.ErrBlobUnknown { + + if dgst.Algorithm() != desc.Digest.Algorithm() && dgst != desc.Digest { + // if the digests differ, set the other canonical mapping + if err := imbdcp.global.SetDescriptor(ctx, desc.Digest, desc); err != nil { + return err + } + } + + // unknown, just set it + return imbdcp.global.SetDescriptor(ctx, dgst, desc) + } + + // we already know it, do nothing + return err +} + +// repositoryScopedInMemoryBlobDescriptorCache provides the request scoped +// repository cache. Instances are not thread-safe but the delegated +// operations are. +type repositoryScopedInMemoryBlobDescriptorCache struct { + repo string + parent *inMemoryBlobDescriptorCacheProvider // allows lazy allocation of repo's map + repository *mapBlobDescriptorCache +} + +func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + rsimbdcp.parent.mu.Lock() + repo := rsimbdcp.repository + rsimbdcp.parent.mu.Unlock() + + if repo == nil { + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + + return repo.Stat(ctx, dgst) +} + +func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { + rsimbdcp.parent.mu.Lock() + repo := rsimbdcp.repository + rsimbdcp.parent.mu.Unlock() + + if repo == nil { + return distribution.ErrBlobUnknown + } + + return repo.Clear(ctx, dgst) +} + +func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + rsimbdcp.parent.mu.Lock() + repo := rsimbdcp.repository + if repo == nil { + // allocate map since we are setting it now. + var ok bool + // have to read back value since we may have allocated elsewhere. + repo, ok = rsimbdcp.parent.repositories[rsimbdcp.repo] + if !ok { + repo = newMapBlobDescriptorCache() + rsimbdcp.parent.repositories[rsimbdcp.repo] = repo + } + rsimbdcp.repository = repo + } + rsimbdcp.parent.mu.Unlock() + + if err := repo.SetDescriptor(ctx, dgst, desc); err != nil { + return err + } + + return rsimbdcp.parent.SetDescriptor(ctx, dgst, desc) +} + +// mapBlobDescriptorCache provides a simple map-based implementation of the +// descriptor cache. +type mapBlobDescriptorCache struct { + descriptors map[digest.Digest]distribution.Descriptor + mu sync.RWMutex +} + +var _ distribution.BlobDescriptorService = &mapBlobDescriptorCache{} + +func newMapBlobDescriptorCache() *mapBlobDescriptorCache { + return &mapBlobDescriptorCache{ + descriptors: make(map[digest.Digest]distribution.Descriptor), + } +} + +func (mbdc *mapBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { + if err := dgst.Validate(); err != nil { + return distribution.Descriptor{}, err + } + + mbdc.mu.RLock() + defer mbdc.mu.RUnlock() + + desc, ok := mbdc.descriptors[dgst] + if !ok { + return distribution.Descriptor{}, distribution.ErrBlobUnknown + } + + return desc, nil +} + +func (mbdc *mapBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { + mbdc.mu.Lock() + defer mbdc.mu.Unlock() + + delete(mbdc.descriptors, dgst) + return nil +} + +func (mbdc *mapBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { + if err := dgst.Validate(); err != nil { + return err + } + + if err := cache.ValidateDescriptor(desc); err != nil { + return err + } + + mbdc.mu.Lock() + defer mbdc.mu.Unlock() + + mbdc.descriptors[dgst] = desc + return nil +} diff --git a/vendor/github.com/docker/distribution/tags.go b/vendor/github.com/docker/distribution/tags.go new file mode 100644 index 0000000000..f22df2b850 --- /dev/null +++ b/vendor/github.com/docker/distribution/tags.go @@ -0,0 +1,27 @@ +package distribution + +import ( + "context" +) + +// TagService provides access to information about tagged objects. +type TagService interface { + // Get retrieves the descriptor identified by the tag. Some + // implementations may differentiate between "trusted" tags and + // "untrusted" tags. If a tag is "untrusted", the mapping will be returned + // as an ErrTagUntrusted error, with the target descriptor. + Get(ctx context.Context, tag string) (Descriptor, error) + + // Tag associates the tag with the provided descriptor, updating the + // current association, if needed. + Tag(ctx context.Context, tag string, desc Descriptor) error + + // Untag removes the given tag association + Untag(ctx context.Context, tag string) error + + // All returns the set of tags managed by this tag service + All(ctx context.Context) ([]string, error) + + // Lookup returns the set of tags referencing the given digest. + Lookup(ctx context.Context, digest Descriptor) ([]string, error) +} diff --git a/vendor/github.com/docker/distribution/vendor.conf b/vendor/github.com/docker/distribution/vendor.conf new file mode 100644 index 0000000000..a249caf269 --- /dev/null +++ b/vendor/github.com/docker/distribution/vendor.conf @@ -0,0 +1,51 @@ +github.com/Azure/azure-sdk-for-go 4650843026a7fdec254a8d9cf893693a254edd0b +github.com/Azure/go-autorest eaa7994b2278094c904d31993d26f56324db3052 +github.com/sirupsen/logrus 3d4380f53a34dcdc95f0c1db702615992b38d9a4 +github.com/aws/aws-sdk-go f831d5a0822a1ad72420ab18c6269bca1ddaf490 +github.com/bshuster-repo/logrus-logstash-hook d2c0ecc1836d91814e15e23bb5dc309c3ef51f4a +github.com/beorn7/perks 4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9 +github.com/bugsnag/bugsnag-go b1d153021fcd90ca3f080db36bec96dc690fb274 +github.com/bugsnag/osext 0dd3f918b21bec95ace9dc86c7e70266cfc5c702 +github.com/bugsnag/panicwrap e2c28503fcd0675329da73bf48b33404db873782 +github.com/denverdino/aliyungo afedced274aa9a7fcdd47ac97018f0f8db4e5de2 +github.com/dgrijalva/jwt-go a601269ab70c205d26370c16f7c81e9017c14e04 +github.com/docker/go-metrics 399ea8c73916000c64c2c76e8da00ca82f8387ab +github.com/docker/libtrust fa567046d9b14f6aa788882a950d69651d230b21 +github.com/garyburd/redigo 535138d7bcd717d6531c701ef5933d98b1866257 +github.com/go-ini/ini 2ba15ac2dc9cdf88c110ec2dc0ced7fa45f5678c +github.com/golang/protobuf 8d92cf5fc15a4382f8964b08e1f42a75c0591aa3 +github.com/gorilla/handlers 60c7bfde3e33c201519a200a4507a158cc03a17b +github.com/gorilla/mux 599cba5e7b6137d46ddf58fb1765f5d928e69604 +github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 +github.com/jmespath/go-jmespath bd40a432e4c76585ef6b72d3fd96fb9b6dc7b68d +github.com/marstr/guid 8bd9a64bf37eb297b492a4101fb28e80ac0b290f +github.com/satori/go.uuid f58768cc1a7a7e77a3bd49e98cdd21419399b6a3 +github.com/matttproud/golang_protobuf_extensions c12348ce28de40eed0136aa2b644d0ee0650e56c +github.com/miekg/dns 271c58e0c14f552178ea321a545ff9af38930f39 +github.com/mitchellh/mapstructure 482a9fd5fa83e8c4e7817413b80f3eb8feec03ef +github.com/ncw/swift a0320860b16212c2b59b4912bb6508cda1d7cee6 +github.com/prometheus/client_golang c332b6f63c0658a65eca15c0e5247ded801cf564 +github.com/prometheus/client_model 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c +github.com/prometheus/common 89604d197083d4781071d3c65855d24ecfb0a563 +github.com/prometheus/procfs cb4147076ac75738c9a7d279075a253c0cc5acbd +github.com/Shopify/logrus-bugsnag 577dee27f20dd8f1a529f82210094af593be12bd +github.com/spf13/cobra 312092086bed4968099259622145a0c9ae280064 +github.com/spf13/pflag 5644820622454e71517561946e3d94b9f9db6842 +github.com/xenolf/lego a9d8cec0e6563575e5868a005359ac97911b5985 +github.com/yvasiyarov/go-metrics 57bccd1ccd43f94bb17fdd8bf3007059b802f85e +github.com/yvasiyarov/gorelic a9bba5b9ab508a086f9a12b8c51fab68478e2128 +github.com/yvasiyarov/newrelic_platform_go b21fdbd4370f3717f3bbd2bf41c223bc273068e6 +golang.org/x/crypto c10c31b5e94b6f7a0283272dc2bb27163dcea24b +golang.org/x/net 4876518f9e71663000c348837735820161a42df7 +golang.org/x/oauth2 045497edb6234273d67dbc25da3f2ddbc4c4cacf +golang.org/x/time a4bde12657593d5e90d0533a3e4fd95e635124cb +google.golang.org/api 9bf6e6e569ff057f75d9604a46c52928f17d2b54 +google.golang.org/appengine 12d5545dc1cfa6047a286d5e853841b6471f4c19 +google.golang.org/cloud 975617b05ea8a58727e6c1a06b6161ff4185a9f2 +google.golang.org/grpc d3ddb4469d5a1b949fc7a7da7c1d6a0d1b6de994 +gopkg.in/check.v1 64131543e7896d5bcc6bd5a76287eb75ea96c673 +gopkg.in/square/go-jose.v1 40d457b439244b546f023d056628e5184136899b +gopkg.in/yaml.v2 v2.2.1 +rsc.io/letsencrypt e770c10b0f1a64775ae91d240407ce00d1a5bdeb https://github.com/dmcgowan/letsencrypt.git +github.com/opencontainers/go-digest a6d0ee40d4207ea02364bd3b9e8e77b9159ba1eb +github.com/opencontainers/image-spec ab7389ef9f50030c9b245bc16b981c7ddf192882 diff --git a/vendor/github.com/docker/docker-credential-helpers/LICENSE b/vendor/github.com/docker/docker-credential-helpers/LICENSE new file mode 100644 index 0000000000..1ea555e2af --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2016 David Calavera + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/docker/docker-credential-helpers/client/client.go b/vendor/github.com/docker/docker-credential-helpers/client/client.go new file mode 100644 index 0000000000..d1d0434cb5 --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/client/client.go @@ -0,0 +1,121 @@ +package client + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" + + "github.com/docker/docker-credential-helpers/credentials" +) + +// isValidCredsMessage checks if 'msg' contains invalid credentials error message. +// It returns whether the logs are free of invalid credentials errors and the error if it isn't. +// error values can be errCredentialsMissingServerURL or errCredentialsMissingUsername. +func isValidCredsMessage(msg string) error { + if credentials.IsCredentialsMissingServerURLMessage(msg) { + return credentials.NewErrCredentialsMissingServerURL() + } + + if credentials.IsCredentialsMissingUsernameMessage(msg) { + return credentials.NewErrCredentialsMissingUsername() + } + + return nil +} + +// Store uses an external program to save credentials. +func Store(program ProgramFunc, creds *credentials.Credentials) error { + cmd := program("store") + + buffer := new(bytes.Buffer) + if err := json.NewEncoder(buffer).Encode(creds); err != nil { + return err + } + cmd.Input(buffer) + + out, err := cmd.Output() + if err != nil { + t := strings.TrimSpace(string(out)) + + if isValidErr := isValidCredsMessage(t); isValidErr != nil { + err = isValidErr + } + + return fmt.Errorf("error storing credentials - err: %v, out: `%s`", err, t) + } + + return nil +} + +// Get executes an external program to get the credentials from a native store. +func Get(program ProgramFunc, serverURL string) (*credentials.Credentials, error) { + cmd := program("get") + cmd.Input(strings.NewReader(serverURL)) + + out, err := cmd.Output() + if err != nil { + t := strings.TrimSpace(string(out)) + + if credentials.IsErrCredentialsNotFoundMessage(t) { + return nil, credentials.NewErrCredentialsNotFound() + } + + if isValidErr := isValidCredsMessage(t); isValidErr != nil { + err = isValidErr + } + + return nil, fmt.Errorf("error getting credentials - err: %v, out: `%s`", err, t) + } + + resp := &credentials.Credentials{ + ServerURL: serverURL, + } + + if err := json.NewDecoder(bytes.NewReader(out)).Decode(resp); err != nil { + return nil, err + } + + return resp, nil +} + +// Erase executes a program to remove the server credentials from the native store. +func Erase(program ProgramFunc, serverURL string) error { + cmd := program("erase") + cmd.Input(strings.NewReader(serverURL)) + out, err := cmd.Output() + if err != nil { + t := strings.TrimSpace(string(out)) + + if isValidErr := isValidCredsMessage(t); isValidErr != nil { + err = isValidErr + } + + return fmt.Errorf("error erasing credentials - err: %v, out: `%s`", err, t) + } + + return nil +} + +// List executes a program to list server credentials in the native store. +func List(program ProgramFunc) (map[string]string, error) { + cmd := program("list") + cmd.Input(strings.NewReader("unused")) + out, err := cmd.Output() + if err != nil { + t := strings.TrimSpace(string(out)) + + if isValidErr := isValidCredsMessage(t); isValidErr != nil { + err = isValidErr + } + + return nil, fmt.Errorf("error listing credentials - err: %v, out: `%s`", err, t) + } + + var resp map[string]string + if err = json.NewDecoder(bytes.NewReader(out)).Decode(&resp); err != nil { + return nil, err + } + + return resp, nil +} diff --git a/vendor/github.com/docker/docker-credential-helpers/client/command.go b/vendor/github.com/docker/docker-credential-helpers/client/command.go new file mode 100644 index 0000000000..0183c06393 --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/client/command.go @@ -0,0 +1,57 @@ +package client + +import ( + "fmt" + "io" + "os" + + exec "golang.org/x/sys/execabs" +) + +// Program is an interface to execute external programs. +type Program interface { + Output() ([]byte, error) + Input(in io.Reader) +} + +// ProgramFunc is a type of function that initializes programs based on arguments. +type ProgramFunc func(args ...string) Program + +// NewShellProgramFunc creates programs that are executed in a Shell. +func NewShellProgramFunc(name string) ProgramFunc { + return NewShellProgramFuncWithEnv(name, nil) +} + +// NewShellProgramFuncWithEnv creates programs that are executed in a Shell with environment variables +func NewShellProgramFuncWithEnv(name string, env *map[string]string) ProgramFunc { + return func(args ...string) Program { + return &Shell{cmd: createProgramCmdRedirectErr(name, args, env)} + } +} + +func createProgramCmdRedirectErr(commandName string, args []string, env *map[string]string) *exec.Cmd { + programCmd := exec.Command(commandName, args...) + programCmd.Env = os.Environ() + if env != nil { + for k, v := range *env { + programCmd.Env = append(programCmd.Env, fmt.Sprintf("%s=%s", k, v)) + } + } + programCmd.Stderr = os.Stderr + return programCmd +} + +// Shell invokes shell commands to talk with a remote credentials helper. +type Shell struct { + cmd *exec.Cmd +} + +// Output returns responses from the remote credentials helper. +func (s *Shell) Output() ([]byte, error) { + return s.cmd.Output() +} + +// Input sets the input to send to a remote credentials helper. +func (s *Shell) Input(in io.Reader) { + s.cmd.Stdin = in +} diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go b/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go new file mode 100644 index 0000000000..da8b594e7f --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/credentials/credentials.go @@ -0,0 +1,186 @@ +package credentials + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "strings" +) + +// Credentials holds the information shared between docker and the credentials store. +type Credentials struct { + ServerURL string + Username string + Secret string +} + +// isValid checks the integrity of Credentials object such that no credentials lack +// a server URL or a username. +// It returns whether the credentials are valid and the error if it isn't. +// error values can be errCredentialsMissingServerURL or errCredentialsMissingUsername +func (c *Credentials) isValid() (bool, error) { + if len(c.ServerURL) == 0 { + return false, NewErrCredentialsMissingServerURL() + } + + if len(c.Username) == 0 { + return false, NewErrCredentialsMissingUsername() + } + + return true, nil +} + +// CredsLabel holds the way Docker credentials should be labeled as such in credentials stores that allow labelling. +// That label allows to filter out non-Docker credentials too at lookup/search in macOS keychain, +// Windows credentials manager and Linux libsecret. Default value is "Docker Credentials" +var CredsLabel = "Docker Credentials" + +// SetCredsLabel is a simple setter for CredsLabel +func SetCredsLabel(label string) { + CredsLabel = label +} + +// Serve initializes the credentials helper and parses the action argument. +// This function is designed to be called from a command line interface. +// It uses os.Args[1] as the key for the action. +// It uses os.Stdin as input and os.Stdout as output. +// This function terminates the program with os.Exit(1) if there is an error. +func Serve(helper Helper) { + var err error + if len(os.Args) != 2 { + err = fmt.Errorf("Usage: %s ", os.Args[0]) + } + + if err == nil { + err = HandleCommand(helper, os.Args[1], os.Stdin, os.Stdout) + } + + if err != nil { + fmt.Fprintf(os.Stdout, "%v\n", err) + os.Exit(1) + } +} + +// HandleCommand uses a helper and a key to run a credential action. +func HandleCommand(helper Helper, key string, in io.Reader, out io.Writer) error { + switch key { + case "store": + return Store(helper, in) + case "get": + return Get(helper, in, out) + case "erase": + return Erase(helper, in) + case "list": + return List(helper, out) + case "version": + return PrintVersion(out) + } + return fmt.Errorf("Unknown credential action `%s`", key) +} + +// Store uses a helper and an input reader to save credentials. +// The reader must contain the JSON serialization of a Credentials struct. +func Store(helper Helper, reader io.Reader) error { + scanner := bufio.NewScanner(reader) + + buffer := new(bytes.Buffer) + for scanner.Scan() { + buffer.Write(scanner.Bytes()) + } + + if err := scanner.Err(); err != nil && err != io.EOF { + return err + } + + var creds Credentials + if err := json.NewDecoder(buffer).Decode(&creds); err != nil { + return err + } + + if ok, err := creds.isValid(); !ok { + return err + } + + return helper.Add(&creds) +} + +// Get retrieves the credentials for a given server url. +// The reader must contain the server URL to search. +// The writer is used to write the JSON serialization of the credentials. +func Get(helper Helper, reader io.Reader, writer io.Writer) error { + scanner := bufio.NewScanner(reader) + + buffer := new(bytes.Buffer) + for scanner.Scan() { + buffer.Write(scanner.Bytes()) + } + + if err := scanner.Err(); err != nil && err != io.EOF { + return err + } + + serverURL := strings.TrimSpace(buffer.String()) + if len(serverURL) == 0 { + return NewErrCredentialsMissingServerURL() + } + + username, secret, err := helper.Get(serverURL) + if err != nil { + return err + } + + resp := Credentials{ + ServerURL: serverURL, + Username: username, + Secret: secret, + } + + buffer.Reset() + if err := json.NewEncoder(buffer).Encode(resp); err != nil { + return err + } + + fmt.Fprint(writer, buffer.String()) + return nil +} + +// Erase removes credentials from the store. +// The reader must contain the server URL to remove. +func Erase(helper Helper, reader io.Reader) error { + scanner := bufio.NewScanner(reader) + + buffer := new(bytes.Buffer) + for scanner.Scan() { + buffer.Write(scanner.Bytes()) + } + + if err := scanner.Err(); err != nil && err != io.EOF { + return err + } + + serverURL := strings.TrimSpace(buffer.String()) + if len(serverURL) == 0 { + return NewErrCredentialsMissingServerURL() + } + + return helper.Delete(serverURL) +} + +//List returns all the serverURLs of keys in +//the OS store as a list of strings +func List(helper Helper, writer io.Writer) error { + accts, err := helper.List() + if err != nil { + return err + } + return json.NewEncoder(writer).Encode(accts) +} + +//PrintVersion outputs the current version. +func PrintVersion(writer io.Writer) error { + fmt.Fprintln(writer, Version) + return nil +} diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/error.go b/vendor/github.com/docker/docker-credential-helpers/credentials/error.go new file mode 100644 index 0000000000..fe6a5aef45 --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/credentials/error.go @@ -0,0 +1,102 @@ +package credentials + +const ( + // ErrCredentialsNotFound standardizes the not found error, so every helper returns + // the same message and docker can handle it properly. + errCredentialsNotFoundMessage = "credentials not found in native keychain" + + // ErrCredentialsMissingServerURL and ErrCredentialsMissingUsername standardize + // invalid credentials or credentials management operations + errCredentialsMissingServerURLMessage = "no credentials server URL" + errCredentialsMissingUsernameMessage = "no credentials username" +) + +// errCredentialsNotFound represents an error +// raised when credentials are not in the store. +type errCredentialsNotFound struct{} + +// Error returns the standard error message +// for when the credentials are not in the store. +func (errCredentialsNotFound) Error() string { + return errCredentialsNotFoundMessage +} + +// NewErrCredentialsNotFound creates a new error +// for when the credentials are not in the store. +func NewErrCredentialsNotFound() error { + return errCredentialsNotFound{} +} + +// IsErrCredentialsNotFound returns true if the error +// was caused by not having a set of credentials in a store. +func IsErrCredentialsNotFound(err error) bool { + _, ok := err.(errCredentialsNotFound) + return ok +} + +// IsErrCredentialsNotFoundMessage returns true if the error +// was caused by not having a set of credentials in a store. +// +// This function helps to check messages returned by an +// external program via its standard output. +func IsErrCredentialsNotFoundMessage(err string) bool { + return err == errCredentialsNotFoundMessage +} + +// errCredentialsMissingServerURL represents an error raised +// when the credentials object has no server URL or when no +// server URL is provided to a credentials operation requiring +// one. +type errCredentialsMissingServerURL struct{} + +func (errCredentialsMissingServerURL) Error() string { + return errCredentialsMissingServerURLMessage +} + +// errCredentialsMissingUsername represents an error raised +// when the credentials object has no username or when no +// username is provided to a credentials operation requiring +// one. +type errCredentialsMissingUsername struct{} + +func (errCredentialsMissingUsername) Error() string { + return errCredentialsMissingUsernameMessage +} + +// NewErrCredentialsMissingServerURL creates a new error for +// errCredentialsMissingServerURL. +func NewErrCredentialsMissingServerURL() error { + return errCredentialsMissingServerURL{} +} + +// NewErrCredentialsMissingUsername creates a new error for +// errCredentialsMissingUsername. +func NewErrCredentialsMissingUsername() error { + return errCredentialsMissingUsername{} +} + +// IsCredentialsMissingServerURL returns true if the error +// was an errCredentialsMissingServerURL. +func IsCredentialsMissingServerURL(err error) bool { + _, ok := err.(errCredentialsMissingServerURL) + return ok +} + +// IsCredentialsMissingServerURLMessage checks for an +// errCredentialsMissingServerURL in the error message. +func IsCredentialsMissingServerURLMessage(err string) bool { + return err == errCredentialsMissingServerURLMessage +} + +// IsCredentialsMissingUsername returns true if the error +// was an errCredentialsMissingUsername. +func IsCredentialsMissingUsername(err error) bool { + _, ok := err.(errCredentialsMissingUsername) + return ok +} + +// IsCredentialsMissingUsernameMessage checks for an +// errCredentialsMissingUsername in the error message. +func IsCredentialsMissingUsernameMessage(err string) bool { + return err == errCredentialsMissingUsernameMessage +} diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go b/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go new file mode 100644 index 0000000000..135acd254d --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/credentials/helper.go @@ -0,0 +1,14 @@ +package credentials + +// Helper is the interface a credentials store helper must implement. +type Helper interface { + // Add appends credentials to the store. + Add(*Credentials) error + // Delete removes credentials from the store. + Delete(serverURL string) error + // Get retrieves credentials from the store. + // It returns username and secret as strings. + Get(serverURL string) (string, string, error) + // List returns the stored serverURLs and their associated usernames. + List() (map[string]string, error) +} diff --git a/vendor/github.com/docker/docker-credential-helpers/credentials/version.go b/vendor/github.com/docker/docker-credential-helpers/credentials/version.go new file mode 100644 index 0000000000..185e367961 --- /dev/null +++ b/vendor/github.com/docker/docker-credential-helpers/credentials/version.go @@ -0,0 +1,4 @@ +package credentials + +// Version holds a string describing the current version +const Version = "0.6.4" diff --git a/vendor/github.com/docker/docker/AUTHORS b/vendor/github.com/docker/docker/AUTHORS new file mode 100644 index 0000000000..dffacff112 --- /dev/null +++ b/vendor/github.com/docker/docker/AUTHORS @@ -0,0 +1,2175 @@ +# This file lists all individuals having contributed content to the repository. +# For how it is generated, see `hack/generate-authors.sh`. + +Aanand Prasad +Aaron Davidson +Aaron Feng +Aaron Hnatiw +Aaron Huslage +Aaron L. Xu +Aaron Lehmann +Aaron Welch +Aaron.L.Xu +Abel Muiño +Abhijeet Kasurde +Abhinandan Prativadi +Abhinav Ajgaonkar +Abhishek Chanda +Abhishek Sharma +Abin Shahab +Adam Avilla +Adam Dobrawy +Adam Eijdenberg +Adam Kunk +Adam Miller +Adam Mills +Adam Pointer +Adam Singer +Adam Walz +Addam Hardy +Aditi Rajagopal +Aditya +Adnan Khan +Adolfo Ochagavía +Adria Casas +Adrian Moisey +Adrian Mouat +Adrian Oprea +Adrien Folie +Adrien Gallouët +Ahmed Kamal +Ahmet Alp Balkan +Aidan Feldman +Aidan Hobson Sayers +AJ Bowen +Ajey Charantimath +ajneu +Akash Gupta +Akhil Mohan +Akihiro Matsushima +Akihiro Suda +Akim Demaille +Akira Koyasu +Akshay Karle +Al Tobey +alambike +Alan Hoyle +Alan Scherger +Alan Thompson +Albert Callarisa +Albert Zhang +Albin Kerouanton +Alejandro González Hevia +Aleksa Sarai +Aleksandrs Fadins +Alena Prokharchyk +Alessandro Boch +Alessio Biancalana +Alex Chan +Alex Chen +Alex Coventry +Alex Crawford +Alex Ellis +Alex Gaynor +Alex Goodman +Alex Olshansky +Alex Samorukov +Alex Warhawk +Alexander Artemenko +Alexander Boyd +Alexander Larsson +Alexander Midlash +Alexander Morozov +Alexander Shopov +Alexandre Beslic +Alexandre Garnier +Alexandre González +Alexandre Jomin +Alexandru Sfirlogea +Alexei Margasov +Alexey Guskov +Alexey Kotlyarov +Alexey Shamrin +Alexis THOMAS +Alfred Landrum +Ali Dehghani +Alicia Lauerman +Alihan Demir +Allen Madsen +Allen Sun +almoehi +Alvaro Saurin +Alvin Deng +Alvin Richards +amangoel +Amen Belayneh +Amir Goldstein +Amit Bakshi +Amit Krishnan +Amit Shukla +Amr Gawish +Amy Lindburg +Anand Patil +AnandkumarPatel +Anatoly Borodin +Anca Iordache +Anchal Agrawal +Anda Xu +Anders Janmyr +Andre Dublin <81dublin@gmail.com> +Andre Granovsky +Andrea Denisse Gómez +Andrea Luzzardi +Andrea Turli +Andreas Elvers +Andreas Köhler +Andreas Savvides +Andreas Tiefenthaler +Andrei Gherzan +Andrei Vagin +Andrew C. Bodine +Andrew Clay Shafer +Andrew Duckworth +Andrew France +Andrew Gerrand +Andrew Guenther +Andrew He +Andrew Hsu +Andrew Kuklewicz +Andrew Macgregor +Andrew Macpherson +Andrew Martin +Andrew McDonnell +Andrew Munsell +Andrew Pennebaker +Andrew Po +Andrew Weiss +Andrew Williams +Andrews Medina +Andrey Kolomentsev +Andrey Petrov +Andrey Stolbovsky +André Martins +andy +Andy Chambers +andy diller +Andy Goldstein +Andy Kipp +Andy Rothfusz +Andy Smith +Andy Wilson +Anes Hasicic +Anil Belur +Anil Madhavapeddy +Ankit Jain +Ankush Agarwal +Anonmily +Anran Qiao +Anshul Pundir +Anthon van der Neut +Anthony Baire +Anthony Bishopric +Anthony Dahanne +Anthony Sottile +Anton Löfgren +Anton Nikitin +Anton Polonskiy +Anton Tiurin +Antonio Murdaca +Antonis Kalipetis +Antony Messerli +Anuj Bahuguna +Anusha Ragunathan +apocas +Arash Deshmeh +ArikaChen +Arko Dasgupta +Arnaud Lefebvre +Arnaud Porterie +Arnaud Rebillout +Arthur Barr +Arthur Gautier +Artur Meyster +Arun Gupta +Asad Saeeduddin +Asbjørn Enge +averagehuman +Avi Das +Avi Kivity +Avi Miller +Avi Vaid +ayoshitake +Azat Khuyiyakhmetov +Bardia Keyoumarsi +Barnaby Gray +Barry Allard +Bartłomiej Piotrowski +Bastiaan Bakker +bdevloed +Ben Bonnefoy +Ben Firshman +Ben Golub +Ben Gould +Ben Hall +Ben Sargent +Ben Severson +Ben Toews +Ben Wiklund +Benjamin Atkin +Benjamin Baker +Benjamin Boudreau +Benjamin Yolken +Benny Ng +Benoit Chesneau +Bernerd Schaefer +Bernhard M. Wiedemann +Bert Goethals +Bertrand Roussel +Bevisy Zhang +Bharath Thiruveedula +Bhiraj Butala +Bhumika Bayani +Bilal Amarni +Bill Wang +Bily Zhang +Bin Liu +Bingshen Wang +Blake Geno +Boaz Shuster +bobby abbott +Boqin Qin +Boris Pruessmann +Boshi Lian +Bouke Haarsma +Boyd Hemphill +boynux +Bradley Cicenas +Bradley Wright +Brandon Liu +Brandon Philips +Brandon Rhodes +Brendan Dixon +Brent Salisbury +Brett Higgins +Brett Kochendorfer +Brett Randall +Brian (bex) Exelbierd +Brian Bland +Brian DeHamer +Brian Dorsey +Brian Flad +Brian Goff +Brian McCallister +Brian Olsen +Brian Schwind +Brian Shumate +Brian Torres-Gil +Brian Trump +Brice Jaglin +Briehan Lombaard +Brielle Broder +Bruno Bigras +Bruno Binet +Bruno Gazzera +Bruno Renié +Bruno Tavares +Bryan Bess +Bryan Boreham +Bryan Matsuo +Bryan Murphy +Burke Libbey +Byung Kang +Caleb Spare +Calen Pennington +Cameron Boehmer +Cameron Spear +Campbell Allen +Candid Dauth +Cao Weiwei +Carl Henrik Lunde +Carl Loa Odin +Carl X. Su +Carlo Mion +Carlos Alexandro Becker +Carlos de Paula +Carlos Sanchez +Carol Fager-Higgins +Cary +Casey Bisson +Catalin Pirvu +Ce Gao +Cedric Davies +Cezar Sa Espinola +Chad Swenson +Chance Zibolski +Chander Govindarajan +Chanhun Jeong +Chao Wang +Charles Chan +Charles Hooper +Charles Law +Charles Lindsay +Charles Merriam +Charles Sarrazin +Charles Smith +Charlie Drage +Charlie Lewis +Chase Bolt +ChaYoung You +Chen Chao +Chen Chuanliang +Chen Hanxiao +Chen Min +Chen Mingjie +Chen Qiu +Cheng-mean Liu +Chengfei Shang +Chengguang Xu +chenyuzhu +Chetan Birajdar +Chewey +Chia-liang Kao +chli +Cholerae Hu +Chris Alfonso +Chris Armstrong +Chris Dias +Chris Dituri +Chris Fordham +Chris Gavin +Chris Gibson +Chris Khoo +Chris McKinnel +Chris McKinnel +Chris Price +Chris Seto +Chris Snow +Chris St. Pierre +Chris Stivers +Chris Swan +Chris Telfer +Chris Wahl +Chris Weyl +Chris White +Christian Berendt +Christian Brauner +Christian Böhme +Christian Muehlhaeuser +Christian Persson +Christian Rotzoll +Christian Simon +Christian Stefanescu +Christophe Mehay +Christophe Troestler +Christophe Vidal +Christopher Biscardi +Christopher Crone +Christopher Currie +Christopher Jones +Christopher Latham +Christopher Rigor +Christy Norman +Chun Chen +Ciro S. Costa +Clayton Coleman +Clinton Kitson +Cody Roseborough +Coenraad Loubser +Colin Dunklau +Colin Hebert +Colin Panisset +Colin Rice +Colin Walters +Collin Guarino +Colm Hally +companycy +Corbin Coleman +Corey Farrell +Cory Forsyth +cressie176 +CrimsonGlory +Cristian Ariza +Cristian Staretu +cristiano balducci +Cristina Yenyxe Gonzalez Garcia +Cruceru Calin-Cristian +CUI Wei +Cyprian Gracz +Cyril F +Daan van Berkel +Daehyeok Mun +Dafydd Crosby +dalanlan +Damian Smyth +Damien Nadé +Damien Nozay +Damjan Georgievski +Dan Anolik +Dan Buch +Dan Cotora +Dan Feldman +Dan Griffin +Dan Hirsch +Dan Keder +Dan Levy +Dan McPherson +Dan Stine +Dan Williams +Dani Hodovic +Dani Louca +Daniel Antlinger +Daniel Black +Daniel Dao +Daniel Exner +Daniel Farrell +Daniel Garcia +Daniel Gasienica +Daniel Grunwell +Daniel Helfand +Daniel Hiltgen +Daniel J Walsh +Daniel Menet +Daniel Mizyrycki +Daniel Nephin +Daniel Norberg +Daniel Nordberg +Daniel Robinson +Daniel S +Daniel Sweet +Daniel Von Fange +Daniel Watkins +Daniel X Moore +Daniel YC Lin +Daniel Zhang +Danny Berger +Danny Milosavljevic +Danny Yates +Danyal Khaliq +Darren Coxall +Darren Shepherd +Darren Stahl +Dattatraya Kumbhar +Davanum Srinivas +Dave Barboza +Dave Goodchild +Dave Henderson +Dave MacDonald +Dave Tucker +David Anderson +David Calavera +David Chung +David Corking +David Cramer +David Currie +David Davis +David Dooling +David Gageot +David Gebler +David Glasser +David Lawrence +David Lechner +David M. Karr +David Mackey +David Mat +David Mcanulty +David McKay +David P Hilton +David Pelaez +David R. Jenni +David Röthlisberger +David Sheets +David Sissitka +David Trott +David Wang <00107082@163.com> +David Williamson +David Xia +David Young +Davide Ceretti +Dawn Chen +dbdd +dcylabs +Debayan De +Deborah Gertrude Digges +deed02392 +Deep Debroy +Deng Guangxing +Deni Bertovic +Denis Defreyne +Denis Gladkikh +Denis Ollier +Dennis Chen +Dennis Chen +Dennis Docter +Derek +Derek +Derek Ch +Derek McGowan +Deric Crago +Deshi Xiao +devmeyster +Devon Estes +Devvyn Murphy +Dharmit Shah +Dhawal Yogesh Bhanushali +Diego Romero +Diego Siqueira +Dieter Reuter +Dillon Dixon +Dima Stopel +Dimitri John Ledkov +Dimitris Mandalidis +Dimitris Rozakis +Dimitry Andric +Dinesh Subhraveti +Ding Fei +Diogo Monica +DiuDiugirl +Djibril Koné +dkumor +Dmitri Logvinenko +Dmitri Shuralyov +Dmitry Demeshchuk +Dmitry Gusev +Dmitry Kononenko +Dmitry Sharshakov +Dmitry Shyshkin +Dmitry Smirnov +Dmitry V. Krivenok +Dmitry Vorobev +Dolph Mathews +Dominic Tubach +Dominic Yin +Dominik Dingel +Dominik Finkbeiner +Dominik Honnef +Don Kirkby +Don Kjer +Don Spaulding +Donald Huang +Dong Chen +Donghwa Kim +Donovan Jones +Doron Podoleanu +Doug Davis +Doug MacEachern +Doug Tangren +Douglas Curtis +Dr Nic Williams +dragon788 +Dražen Lučanin +Drew Erny +Drew Hubl +Dustin Sallings +Ed Costello +Edmund Wagner +Eiichi Tsukata +Eike Herzbach +Eivin Giske Skaaren +Eivind Uggedal +Elan Ruusamäe +Elango Sivanandam +Elena Morozova +Eli Uriegas +Elias Faxö +Elias Probst +Elijah Zupancic +eluck +Elvir Kuric +Emil Davtyan +Emil Hernvall +Emily Maier +Emily Rose +Emir Ozer +Enguerran +Eohyung Lee +epeterso +Eric Barch +Eric Curtin +Eric G. Noriega +Eric Hanchrow +Eric Lee +Eric Myhre +Eric Paris +Eric Rafaloff +Eric Rosenberg +Eric Sage +Eric Soderstrom +Eric Yang +Eric-Olivier Lamey +Erica Windisch +Erik Bray +Erik Dubbelboer +Erik Hollensbe +Erik Inge Bolsø +Erik Kristensen +Erik St. Martin +Erik Weathers +Erno Hopearuoho +Erwin van der Koogh +Ethan Bell +Ethan Mosbaugh +Euan Kemp +Eugen Krizo +Eugene Yakubovich +Evan Allrich +Evan Carmi +Evan Hazlett +Evan Krall +Evan Phoenix +Evan Wies +Evelyn Xu +Everett Toews +Evgeniy Makhrov +Evgeny Shmarnev +Evgeny Vereshchagin +Ewa Czechowska +Eystein Måløy Stenberg +ezbercih +Ezra Silvera +Fabian Kramm +Fabian Lauer +Fabian Raetz +Fabiano Rosas +Fabio Falci +Fabio Kung +Fabio Rapposelli +Fabio Rehm +Fabrizio Regini +Fabrizio Soppelsa +Faiz Khan +falmp +Fangming Fang +Fangyuan Gao <21551127@zju.edu.cn> +fanjiyun +Fareed Dudhia +Fathi Boudra +Federico Gimenez +Felipe Oliveira +Felipe Ruhland +Felix Abecassis +Felix Geisendörfer +Felix Hupfeld +Felix Rabe +Felix Ruess +Felix Schindler +Feng Yan +Fengtu Wang +Ferenc Szabo +Fernando +Fero Volar +Ferran Rodenas +Filipe Brandenburger +Filipe Oliveira +Flavio Castelli +Flavio Crisciani +Florian +Florian Klein +Florian Maier +Florian Noeding +Florian Schmaus +Florian Weingarten +Florin Asavoaie +Florin Patan +fonglh +Foysal Iqbal +Francesc Campoy +Francesco Mari +Francis Chuang +Francisco Carriedo +Francisco Souza +Frank Groeneveld +Frank Herrmann +Frank Macreery +Frank Rosquin +frankyang +Fred Lifton +Frederick F. Kautz IV +Frederik Loeffert +Frederik Nordahl Jul Sabroe +Freek Kalter +Frieder Bluemle +Fu JinLin +Félix Baylac-Jacqué +Félix Cantournet +Gabe Rosenhouse +Gabor Nagy +Gabriel Linder +Gabriel Monroy +Gabriel Nicolas Avellaneda +Gaetan de Villele +Galen Sampson +Gang Qiao +Gareth Rushgrove +Garrett Barboza +Gary Schaetz +Gaurav +Gaurav Singh +Gaël PORTAY +Genki Takiuchi +GennadySpb +Geoffrey Bachelet +Geon Kim +George Kontridze +George MacRorie +George Xie +Georgi Hristozov +Gereon Frey +German DZ +Gert van Valkenhoef +Gerwim Feiken +Ghislain Bourgeois +Giampaolo Mancini +Gianluca Borello +Gildas Cuisinier +Giovan Isa Musthofa +gissehel +Giuseppe Mazzotta +Gleb Fotengauer-Malinovskiy +Gleb M Borisov +Glyn Normington +GoBella +Goffert van Gool +Goldwyn Rodrigues +Gopikannan Venugopalsamy +Gosuke Miyashita +Gou Rao +Govinda Fichtner +Grant Millar +Grant Reaber +Graydon Hoare +Greg Fausak +Greg Pflaum +Greg Stephens +Greg Thornton +Grzegorz Jaśkiewicz +Guilhem Lettron +Guilherme Salgado +Guillaume Dufour +Guillaume J. Charmes +guoxiuyan +Guri +Gurjeet Singh +Guruprasad +Gustav Sinder +gwx296173 +Günter Zöchbauer +Haichao Yang +haikuoliu +Hakan Özler +Hamish Hutchings +Hannes Ljungberg +Hans Kristian Flaatten +Hans Rødtang +Hao Shu Wei +Hao Zhang <21521210@zju.edu.cn> +Harald Albers +Harald Niesche +Harley Laue +Harold Cooper +Harrison Turton +Harry Zhang +Harshal Patil +Harshal Patil +He Simei +He Xiaoxi +He Xin +heartlock <21521209@zju.edu.cn> +Hector Castro +Helen Xie +Henning Sprang +Hiroshi Hatake +Hiroyuki Sasagawa +Hobofan +Hollie Teal +Hong Xu +Hongbin Lu +Hongxu Jia +Honza Pokorny +Hsing-Hui Hsu +hsinko <21551195@zju.edu.cn> +Hu Keping +Hu Tao +HuanHuan Ye +Huanzhong Zhang +Huayi Zhang +Hugo Duncan +Hugo Marisco <0x6875676f@gmail.com> +Hunter Blanks +huqun +Huu Nguyen +hyeongkyu.lee +Hyzhou Zhy +Iago López Galeiras +Ian Babrou +Ian Bishop +Ian Bull +Ian Calvert +Ian Campbell +Ian Chen +Ian Lee +Ian Main +Ian Philpot +Ian Truslove +Iavael +Icaro Seara +Ignacio Capurro +Igor Dolzhikov +Igor Karpovich +Iliana Weller +Ilkka Laukkanen +Ilya Dmitrichenko +Ilya Gusev +Ilya Khlopotov +imre Fitos +inglesp +Ingo Gottwald +Innovimax +Isaac Dupree +Isabel Jimenez +Isaiah Grace +Isao Jonas +Iskander Sharipov +Ivan Babrou +Ivan Fraixedes +Ivan Grcic +Ivan Markin +J Bruni +J. Nunn +Jack Danger Canty +Jack Laxson +Jacob Atzen +Jacob Edelman +Jacob Tomlinson +Jacob Vallejo +Jacob Wen +Jaime Cepeda +Jaivish Kothari +Jake Champlin +Jake Moshenko +Jake Sanders +jakedt +James Allen +James Carey +James Carr +James DeFelice +James Harrison Fisher +James Kyburz +James Kyle +James Lal +James Mills +James Nesbitt +James Nugent +James Turnbull +James Watkins-Harvey +Jamie Hannaford +Jamshid Afshar +Jan Chren +Jan Keromnes +Jan Koprowski +Jan Pazdziora +Jan Toebes +Jan-Gerd Tenberge +Jan-Jaap Driessen +Jana Radhakrishnan +Jannick Fahlbusch +Januar Wayong +Jared Biel +Jared Hocutt +Jaroslaw Zabiello +jaseg +Jasmine Hegman +Jason A. Donenfeld +Jason Divock +Jason Giedymin +Jason Green +Jason Hall +Jason Heiss +Jason Livesay +Jason McVetta +Jason Plum +Jason Shepherd +Jason Smith +Jason Sommer +Jason Stangroome +jaxgeller +Jay +Jay +Jay Kamat +Jean Rouge +Jean-Baptiste Barth +Jean-Baptiste Dalido +Jean-Christophe Berthon +Jean-Paul Calderone +Jean-Pierre Huynh +Jean-Tiare Le Bigot +Jeeva S. Chelladhurai +Jeff Anderson +Jeff Hajewski +Jeff Johnston +Jeff Lindsay +Jeff Mickey +Jeff Minard +Jeff Nickoloff +Jeff Silberman +Jeff Welch +Jeffrey Bolle +Jeffrey Morgan +Jeffrey van Gogh +Jenny Gebske +Jeremy Chambers +Jeremy Grosser +Jeremy Price +Jeremy Qian +Jeremy Unruh +Jeremy Yallop +Jeroen Franse +Jeroen Jacobs +Jesse Dearing +Jesse Dubay +Jessica Frazelle +Jezeniel Zapanta +Jhon Honce +Ji.Zhilong +Jian Liao +Jian Zhang +Jiang Jinyang +Jie Luo +Jie Ma +Jihyun Hwang +Jilles Oldenbeuving +Jim Alateras +Jim Ehrismann +Jim Galasyn +Jim Minter +Jim Perrin +Jimmy Cuadra +Jimmy Puckett +Jimmy Song +Jinsoo Park +Jintao Zhang +Jiri Appl +Jiri Popelka +Jiuyue Ma +Jiří Župka +Joao Fernandes +Joao Trindade +Joe Beda +Joe Doliner +Joe Ferguson +Joe Gordon +Joe Shaw +Joe Van Dyk +Joel Friedly +Joel Handwell +Joel Hansson +Joel Wurtz +Joey Geiger +Joey Geiger +Joey Gibson +Joffrey F +Johan Euphrosine +Johan Rydberg +Johanan Lieberman +Johannes 'fish' Ziemke +John Costa +John Feminella +John Gardiner Myers +John Gossman +John Harris +John Howard +John Laswell +John Maguire +John Mulhausen +John OBrien III +John Starks +John Stephens +John Tims +John V. Martinez +John Warwick +John Willis +Jon Johnson +Jon Surrell +Jon Wedaman +Jonas Dohse +Jonas Heinrich +Jonas Pfenniger +Jonathan A. Schweder +Jonathan A. Sternberg +Jonathan Boulle +Jonathan Camp +Jonathan Choy +Jonathan Dowland +Jonathan Lebon +Jonathan Lomas +Jonathan McCrohan +Jonathan Mueller +Jonathan Pares +Jonathan Rudenberg +Jonathan Stoppani +Jonh Wendell +Joni Sar +Joost Cassee +Jordan Arentsen +Jordan Jennings +Jordan Sissel +Jorge Marin +Jorit Kleine-Möllhoff +Jose Diaz-Gonzalez +Joseph Anthony Pasquale Holsten +Joseph Hager +Joseph Kern +Joseph Rothrock +Josh +Josh Bodah +Josh Bonczkowski +Josh Chorlton +Josh Eveleth +Josh Hawn +Josh Horwitz +Josh Poimboeuf +Josh Soref +Josh Wilson +Josiah Kiehl +José Tomás Albornoz +Joyce Jang +JP +Julian Taylor +Julien Barbier +Julien Bisconti +Julien Bordellier +Julien Dubois +Julien Kassar +Julien Maitrehenry +Julien Pervillé +Julien Pivotto +Julio Guerra +Julio Montes +Jun-Ru Chang +Jussi Nummelin +Justas Brazauskas +Justen Martin +Justin Cormack +Justin Force +Justin Menga +Justin Plock +Justin Simonelis +Justin Terry +Justyn Temme +Jyrki Puttonen +Jérémy Leherpeur +Jérôme Petazzoni +Jörg Thalheim +K. Heller +Kai Blin +Kai Qiang Wu (Kennan) +Kamil Domański +Kamjar Gerami +Kanstantsin Shautsou +Kara Alexandra +Karan Lyons +Kareem Khazem +kargakis +Karl Grzeszczak +Karol Duleba +Karthik Karanth +Karthik Nayak +Kasper Fabæch Brandt +Kate Heddleston +Katie McLaughlin +Kato Kazuyoshi +Katrina Owen +Kawsar Saiyeed +Kay Yan +kayrus +Kazuhiro Sera +Ke Li +Ke Xu +Kei Ohmura +Keith Hudgins +Keli Hu +Ken Cochrane +Ken Herner +Ken ICHIKAWA +Ken Reese +Kenfe-Mickaël Laventure +Kenjiro Nakayama +Kent Johnson +Kenta Tada +Kevin "qwazerty" Houdebert +Kevin Burke +Kevin Clark +Kevin Feyrer +Kevin J. Lynagh +Kevin Jing Qiu +Kevin Kern +Kevin Menard +Kevin Meredith +Kevin P. Kucharczyk +Kevin Parsons +Kevin Richardson +Kevin Shi +Kevin Wallace +Kevin Yap +Keyvan Fatehi +kies +Kim BKC Carlbacker +Kim Eik +Kimbro Staken +Kir Kolyshkin +Kiran Gangadharan +Kirill SIbirev +knappe +Kohei Tsuruta +Koichi Shiraishi +Konrad Kleine +Konstantin Gribov +Konstantin L +Konstantin Pelykh +Krasi Georgiev +Krasimir Georgiev +Kris-Mikael Krister +Kristian Haugene +Kristina Zabunova +Krystian Wojcicki +Kun Zhang +Kunal Kushwaha +Kunal Tyagi +Kyle Conroy +Kyle Linden +Kyle Wuolle +kyu +Lachlan Coote +Lai Jiangshan +Lajos Papp +Lakshan Perera +Lalatendu Mohanty +Lance Chen +Lance Kinley +Lars Butler +Lars Kellogg-Stedman +Lars R. Damerow +Lars-Magnus Skog +Laszlo Meszaros +Laura Frank +Laurent Erignoux +Laurie Voss +Leandro Siqueira +Lee Chao <932819864@qq.com> +Lee, Meng-Han +leeplay +Lei Gong +Lei Jitang +Len Weincier +Lennie +Leo Gallucci +Leszek Kowalski +Levi Blackstone +Levi Gross +Lewis Daly +Lewis Marshall +Lewis Peckover +Li Yi +Liam Macgillavry +Liana Lo +Liang Mingqiang +Liang-Chi Hsieh +Liao Qingwei +Lifubang +Lihua Tang +Lily Guo +limsy +Lin Lu +LingFaKe +Linus Heckemann +Liran Tal +Liron Levin +Liu Bo +Liu Hua +liwenqi +lixiaobing10051267 +Liz Zhang +LIZAO LI +Lizzie Dixon <_@lizzie.io> +Lloyd Dewolf +Lokesh Mandvekar +longliqiang88 <394564827@qq.com> +Lorenz Leutgeb +Lorenzo Fontana +Lotus Fenn +Louis Delossantos +Louis Opter +Luca Favatella +Luca Marturana +Luca Orlandi +Luca-Bogdan Grigorescu +Lucas Chan +Lucas Chi +Lucas Molas +Lucas Silvestre +Luciano Mores +Luis Martínez de Bartolomé Izquierdo +Luiz Svoboda +Lukas Heeren +Lukas Waslowski +lukaspustina +Lukasz Zajaczkowski +Luke Marsden +Lyn +Lynda O'Leary +Lénaïc Huard +Ma Müller +Ma Shimiao +Mabin +Madhan Raj Mookkandy +Madhav Puri +Madhu Venugopal +Mageee +Mahesh Tiyyagura +malnick +Malte Janduda +Manfred Touron +Manfred Zabarauskas +Manjunath A Kumatagi +Mansi Nahar +Manuel Meurer +Manuel Rüger +Manuel Woelker +mapk0y +Marc Abramowitz +Marc Kuo +Marc Tamsky +Marcel Edmund Franke +Marcelo Horacio Fortino +Marcelo Salazar +Marco Hennings +Marcus Cobden +Marcus Farkas +Marcus Linke +Marcus Martins +Marcus Ramberg +Marek Goldmann +Marian Marinov +Marianna Tessel +Mario Loriedo +Marius Gundersen +Marius Sturm +Marius Voila +Mark Allen +Mark Jeromin +Mark McGranaghan +Mark McKinstry +Mark Milstein +Mark Oates +Mark Parker +Mark West +Markan Patel +Marko Mikulicic +Marko Tibold +Markus Fix +Markus Kortlang +Martijn Dwars +Martijn van Oosterhout +Martin Honermeyer +Martin Kelly +Martin Mosegaard Amdisen +Martin Muzatko +Martin Redmond +Mary Anthony +Masahito Zembutsu +Masato Ohba +Masayuki Morita +Mason Malone +Mateusz Sulima +Mathias Monnerville +Mathieu Champlon +Mathieu Le Marec - Pasquet +Mathieu Parent +Matt Apperson +Matt Bachmann +Matt Bentley +Matt Haggard +Matt Hoyle +Matt McCormick +Matt Moore +Matt Richardson +Matt Rickard +Matt Robenolt +Matt Schurenko +Matt Williams +Matthew Heon +Matthew Lapworth +Matthew Mayer +Matthew Mosesohn +Matthew Mueller +Matthew Riley +Matthias Klumpp +Matthias Kühnle +Matthias Rampke +Matthieu Hauglustaine +Mattias Jernberg +Mauricio Garavaglia +mauriyouth +Max Harmathy +Max Shytikov +Maxim Fedchyshyn +Maxim Ivanov +Maxim Kulkin +Maxim Treskin +Maxime Petazzoni +Maximiliano Maccanti +Maxwell +Meaglith Ma +meejah +Megan Kostick +Mehul Kar +Mei ChunTao +Mengdi Gao +Mert Yazıcıoğlu +mgniu +Micah Zoltu +Michael A. Smith +Michael Bridgen +Michael Brown +Michael Chiang +Michael Crosby +Michael Currie +Michael Friis +Michael Gorsuch +Michael Grauer +Michael Holzheu +Michael Hudson-Doyle +Michael Huettermann +Michael Irwin +Michael Käufl +Michael Neale +Michael Nussbaum +Michael Prokop +Michael Scharf +Michael Spetsiotis +Michael Stapelberg +Michael Steinert +Michael Thies +Michael West +Michael Zhao +Michal Fojtik +Michal Gebauer +Michal Jemala +Michal Minář +Michal Wieczorek +Michaël Pailloncy +Michał Czeraszkiewicz +Michał Gryko +Michiel de Jong +Mickaël Fortunato +Mickaël Remars +Miguel Angel Fernández +Miguel Morales +Mihai Borobocea +Mihuleacc Sergiu +Mike Brown +Mike Bush +Mike Casas +Mike Chelen +Mike Danese +Mike Dillon +Mike Dougherty +Mike Estes +Mike Gaffney +Mike Goelzer +Mike Leone +Mike Lundy +Mike MacCana +Mike Naberezny +Mike Snitzer +mikelinjie <294893458@qq.com> +Mikhail Sobolev +Miklos Szegedi +Milind Chawre +Miloslav Trmač +mingqing +Mingzhen Feng +Misty Stanley-Jones +Mitch Capper +Mizuki Urushida +mlarcher +Mohammad Banikazemi +Mohammad Nasirifar +Mohammed Aaqib Ansari +Mohit Soni +Moorthy RS +Morgan Bauer +Morgante Pell +Morgy93 +Morten Siebuhr +Morton Fox +Moysés Borges +mrfly +Mrunal Patel +Muayyad Alsadi +Mustafa Akın +Muthukumar R +Máximo Cuadros +Médi-Rémi Hashim +Nace Oroz +Nahum Shalman +Nakul Pathak +Nalin Dahyabhai +Nan Monnand Deng +Naoki Orii +Natalie Parker +Natanael Copa +Natasha Jarus +Nate Brennand +Nate Eagleson +Nate Jones +Nathan Hsieh +Nathan Kleyn +Nathan LeClaire +Nathan McCauley +Nathan Williams +Naveed Jamil +Neal McBurnett +Neil Horman +Neil Peterson +Nelson Chen +Neyazul Haque +Nghia Tran +Niall O'Higgins +Nicholas E. Rabenau +Nick Adcock +Nick DeCoursin +Nick Irvine +Nick Neisen +Nick Parker +Nick Payne +Nick Russo +Nick Stenning +Nick Stinemates +NickrenREN +Nicola Kabar +Nicolas Borboën +Nicolas De Loof +Nicolas Dudebout +Nicolas Goy +Nicolas Kaiser +Nicolas Sterchele +Nicolas V Castet +Nicolás Hock Isaza +Nigel Poulton +Nik Nyby +Nikhil Chawla +NikolaMandic +Nikolas Garofil +Nikolay Edigaryev +Nikolay Milovanov +Nirmal Mehta +Nishant Totla +NIWA Hideyuki +Noah Meyerhans +Noah Treuhaft +NobodyOnSE +noducks +Nolan Darilek +Noriki Nakamura +nponeccop +Nuutti Kotivuori +nzwsch +O.S. Tezer +objectified +Odin Ugedal +Oguz Bilgic +Oh Jinkyun +Ohad Schneider +ohmystack +Ole Reifschneider +Oliver Neal +Oliver Reason +Olivier Gambier +Olle Jonsson +Olli Janatuinen +Olly Pomeroy +Omri Shiv +Oriol Francès +Oskar Niburski +Otto Kekäläinen +Ouyang Liduo +Ovidio Mallo +Panagiotis Moustafellos +Paolo G. Giarrusso +Pascal +Pascal Bach +Pascal Borreli +Pascal Hartig +Patrick Böänziger +Patrick Devine +Patrick Hemmer +Patrick Stapleton +Patrik Cyvoct +pattichen +Paul +paul +Paul Annesley +Paul Bellamy +Paul Bowsher +Paul Furtado +Paul Hammond +Paul Jimenez +Paul Kehrer +Paul Lietar +Paul Liljenberg +Paul Morie +Paul Nasrat +Paul Weaver +Paulo Ribeiro +Pavel Lobashov +Pavel Matěja +Pavel Pletenev +Pavel Pospisil +Pavel Sutyrin +Pavel Tikhomirov +Pavlos Ratis +Pavol Vargovcik +Pawel Konczalski +Peeyush Gupta +Peggy Li +Pei Su +Peng Tao +Penghan Wang +Per Weijnitz +perhapszzy@sina.com +Peter Bourgon +Peter Braden +Peter Bücker +Peter Choi +Peter Dave Hello +Peter Edge +Peter Ericson +Peter Esbensen +Peter Jaffe +Peter Kang +Peter Malmgren +Peter Salvatore +Peter Volpe +Peter Waller +Petr Švihlík +Phil +Phil Estes +Phil Spitler +Philip Alexander Etling +Philip Monroe +Philipp Gillé +Philipp Wahala +Philipp Weissensteiner +Phillip Alexander +phineas +pidster +Piergiuliano Bossi +Pierre +Pierre Carrier +Pierre Dal-Pra +Pierre Wacrenier +Pierre-Alain RIVIERE +Piotr Bogdan +pixelistik +Porjo +Poul Kjeldager Sørensen +Pradeep Chhetri +Pradip Dhara +Prasanna Gautam +Pratik Karki +Prayag Verma +Priya Wadhwa +Projjol Banerji +Przemek Hejman +Pure White +pysqz +Qiang Huang +Qinglan Peng +qudongfang +Quentin Brossard +Quentin Perez +Quentin Tayssier +r0n22 +Radostin Stoyanov +Rafal Jeczalik +Rafe Colton +Raghavendra K T +Raghuram Devarakonda +Raja Sami +Rajat Pandit +Rajdeep Dua +Ralf Sippl +Ralle +Ralph Bean +Ramkumar Ramachandra +Ramon Brooker +Ramon van Alteren +RaviTeja Pothana +Ray Tsang +ReadmeCritic +Recursive Madman +Reficul +Regan McCooey +Remi Rampin +Remy Suen +Renato Riccieri Santos Zannon +Renaud Gaubert +Rhys Hiltner +Ri Xu +Ricardo N Feliciano +Rich Moyse +Rich Seymour +Richard +Richard Burnison +Richard Harvey +Richard Mathie +Richard Metzler +Richard Scothern +Richo Healey +Rick Bradley +Rick van de Loo +Rick Wieman +Rik Nijessen +Riku Voipio +Riley Guerin +Ritesh H Shukla +Riyaz Faizullabhoy +Rob Gulewich +Rob Vesse +Robert Bachmann +Robert Bittle +Robert Obryk +Robert Schneider +Robert Stern +Robert Terhaar +Robert Wallis +Robert Wang +Roberto G. Hashioka +Roberto Muñoz Fernández +Robin Naundorf +Robin Schneider +Robin Speekenbrink +Robin Thoni +robpc +Rodolfo Carvalho +Rodrigo Vaz +Roel Van Nyen +Roger Peppe +Rohit Jnagal +Rohit Kadam +Rohit Kapur +Rojin George +Roland Huß +Roland Kammerer +Roland Moriz +Roma Sokolov +Roman Dudin +Roman Mazur +Roman Strashkin +Ron Smits +Ron Williams +Rong Gao +Rong Zhang +Rongxiang Song +root +root +root +root +Rory Hunter +Rory McCune +Ross Boucher +Rovanion Luckey +Royce Remer +Rozhnov Alexandr +Rudolph Gottesheim +Rui Cao +Rui Lopes +Ruilin Li +Runshen Zhu +Russ Magee +Ryan Abrams +Ryan Anderson +Ryan Aslett +Ryan Belgrave +Ryan Detzel +Ryan Fowler +Ryan Liu +Ryan McLaughlin +Ryan O'Donnell +Ryan Seto +Ryan Simmen +Ryan Stelly +Ryan Thomas +Ryan Trauntvein +Ryan Wallner +Ryan Zhang +ryancooper7 +RyanDeng +Ryo Nakao +Rémy Greinhofer +s. rannou +s00318865 +Sabin Basyal +Sachin Joshi +Sagar Hani +Sainath Grandhi +Sakeven Jiang +Salahuddin Khan +Sally O'Malley +Sam Abed +Sam Alba +Sam Bailey +Sam J Sharpe +Sam Neirinck +Sam Reis +Sam Rijs +Sam Whited +Sambuddha Basu +Sami Wagiaalla +Samuel Andaya +Samuel Dion-Girardeau +Samuel Karp +Samuel PHAN +Sandeep Bansal +Sankar சங்கர் +Sanket Saurav +Santhosh Manohar +sapphiredev +Sargun Dhillon +Sascha Andres +Sascha Grunert +SataQiu +Satnam Singh +Satoshi Amemiya +Satoshi Tagomori +Scott Bessler +Scott Collier +Scott Johnston +Scott Stamp +Scott Walls +sdreyesg +Sean Christopherson +Sean Cronin +Sean Lee +Sean McIntyre +Sean OMeara +Sean P. Kane +Sean Rodman +Sebastiaan van Steenis +Sebastiaan van Stijn +Senthil Kumar Selvaraj +Senthil Kumaran +SeongJae Park +Seongyeol Lim +Serge Hallyn +Sergey Alekseev +Sergey Evstifeev +Sergii Kabashniuk +Sergio Lopez +Serhat Gülçiçek +SeungUkLee +Sevki Hasirci +Shane Canon +Shane da Silva +Shaun Kaasten +shaunol +Shawn Landden +Shawn Siefkas +shawnhe +Shayne Wang +Shekhar Gulati +Sheng Yang +Shengbo Song +Shev Yan +Shih-Yuan Lee +Shijiang Wei +Shijun Qin +Shishir Mahajan +Shoubhik Bose +Shourya Sarcar +Shu-Wai Chow +shuai-z +Shukui Yang +Shuwei Hao +Sian Lerk Lau +Sidhartha Mani +sidharthamani +Silas Sewell +Silvan Jegen +Simão Reis +Simei He +Simon Barendse +Simon Eskildsen +Simon Ferquel +Simon Leinen +Simon Menke +Simon Taranto +Simon Vikstrom +Sindhu S +Sjoerd Langkemper +skanehira +Solganik Alexander +Solomon Hykes +Song Gao +Soshi Katsuta +Soulou +Spencer Brown +Spencer Smith +Sridatta Thatipamala +Sridhar Ratnakumar +Srini Brahmaroutu +Srinivasan Srivatsan +Staf Wagemakers +Stanislav Bondarenko +Stanislav Levin +Steeve Morin +Stefan Berger +Stefan J. Wernli +Stefan Praszalowicz +Stefan S. +Stefan Scherer +Stefan Staudenmeyer +Stefan Weil +Stephan Spindler +Stephen Benjamin +Stephen Crosby +Stephen Day +Stephen Drake +Stephen Rust +Steve Desmond +Steve Dougherty +Steve Durrheimer +Steve Francia +Steve Koch +Steven Burgess +Steven Erenst +Steven Hartland +Steven Iveson +Steven Merrill +Steven Richards +Steven Taylor +Stig Larsson +Subhajit Ghosh +Sujith Haridasan +Sun Gengze <690388648@qq.com> +Sun Jianbo +Sune Keller +Sunny Gogoi +Suryakumar Sudar +Sven Dowideit +Swapnil Daingade +Sylvain Baubeau +Sylvain Bellemare +Sébastien +Sébastien HOUZÉ +Sébastien Luttringer +Sébastien Stormacq +Tabakhase +Tadej Janež +TAGOMORI Satoshi +tang0th +Tangi Colin +Tatsuki Sugiura +Tatsushi Inagaki +Taylan Isikdemir +Taylor Jones +Ted M. Young +Tehmasp Chaudhri +Tejaswini Duggaraju +Tejesh Mehta +terryding77 <550147740@qq.com> +tgic +Thatcher Peskens +theadactyl +Thell 'Bo' Fowler +Thermionix +Thijs Terlouw +Thomas Bikeev +Thomas Frössman +Thomas Gazagnaire +Thomas Grainger +Thomas Hansen +Thomas Leonard +Thomas Léveil +Thomas Orozco +Thomas Riccardi +Thomas Schroeter +Thomas Sjögren +Thomas Swift +Thomas Tanaka +Thomas Texier +Ti Zhou +Tianon Gravi +Tianyi Wang +Tibor Vass +Tiffany Jernigan +Tiffany Low +Till Wegmüller +Tim +Tim Bart +Tim Bosse +Tim Dettrick +Tim Düsterhus +Tim Hockin +Tim Potter +Tim Ruffles +Tim Smith +Tim Terhorst +Tim Wang +Tim Waugh +Tim Wraight +Tim Zju <21651152@zju.edu.cn> +timfeirg +Timothy Hobbs +tjwebb123 +tobe +Tobias Bieniek +Tobias Bradtke +Tobias Gesellchen +Tobias Klauser +Tobias Munk +Tobias Schmidt +Tobias Schwab +Todd Crane +Todd Lunter +Todd Whiteman +Toli Kuznets +Tom Barlow +Tom Booth +Tom Denham +Tom Fotherby +Tom Howe +Tom Hulihan +Tom Maaswinkel +Tom Sweeney +Tom Wilkie +Tom X. Tobin +Tomas Tomecek +Tomasz Kopczynski +Tomasz Lipinski +Tomasz Nurkiewicz +Tommaso Visconti +Tomáš Hrčka +Tonny Xu +Tony Abboud +Tony Daws +Tony Miller +toogley +Torstein Husebø +Tõnis Tiigi +Trace Andreason +tracylihui <793912329@qq.com> +Trapier Marshall +Travis Cline +Travis Thieman +Trent Ogren +Trevor +Trevor Pounds +Trevor Sullivan +Trishna Guha +Tristan Carel +Troy Denton +Tycho Andersen +Tyler Brock +Tyler Brown +Tzu-Jung Lee +uhayate +Ulysse Carion +Umesh Yadav +Utz Bacher +vagrant +Vaidas Jablonskis +vanderliang +Velko Ivanov +Veres Lajos +Victor Algaze +Victor Coisne +Victor Costan +Victor I. Wood +Victor Lyuboslavsky +Victor Marmol +Victor Palma +Victor Vieux +Victoria Bialas +Vijaya Kumar K +Vikram bir Singh +Viktor Stanchev +Viktor Vojnovski +VinayRaghavanKS +Vincent Batts +Vincent Bernat +Vincent Boulineau +Vincent Demeester +Vincent Giersch +Vincent Mayers +Vincent Woo +Vinod Kulkarni +Vishal Doshi +Vishnu Kannan +Vitaly Ostrosablin +Vitor Monteiro +Vivek Agarwal +Vivek Dasgupta +Vivek Goyal +Vladimir Bulyga +Vladimir Kirillov +Vladimir Pouzanov +Vladimir Rutsky +Vladimir Varankin +VladimirAus +Vlastimil Zeman +Vojtech Vitek (V-Teq) +waitingkuo +Walter Leibbrandt +Walter Stanish +Wang Chao +Wang Guoliang +Wang Jie +Wang Long +Wang Ping +Wang Xing +Wang Yuexiao +Wang Yumu <37442693@qq.com> +wanghuaiqing +Ward Vandewege +WarheadsSE +Wassim Dhif +Wayne Chang +Wayne Song +Weerasak Chongnguluam +Wei Fu +Wei Wu +Wei-Ting Kuo +weipeng +weiyan +Weiyang Zhu +Wen Cheng Ma +Wendel Fleming +Wenjun Tang +Wenkai Yin +wenlxie +Wentao Zhang +Wenxuan Zhao +Wenyu You <21551128@zju.edu.cn> +Wenzhi Liang +Wes Morgan +Wewang Xiaorenfine +Wiktor Kwapisiewicz +Will Dietz +Will Rouesnel +Will Weaver +willhf +William Delanoue +William Henry +William Hubbs +William Martin +William Riancho +William Thurston +Wilson Júnior +Wing-Kam Wong +WiseTrem +Wolfgang Powisch +Wonjun Kim +xamyzhao +Xian Chaobo +Xianglin Gao +Xianlu Bird +Xiao YongBiao +XiaoBing Jiang +Xiaodong Liu +Xiaodong Zhang +Xiaoxi He +Xiaoxu Chen +Xiaoyu Zhang +xichengliudui <1693291525@qq.com> +xiekeyang +Ximo Guanter Gonzálbez +Xinbo Weng +Xinfeng Liu +Xinzi Zhou +Xiuming Chen +Xuecong Liao +xuzhaokui +Yadnyawalkya Tale +Yahya +YAMADA Tsuyoshi +Yamasaki Masahide +Yan Feng +Yang Bai +Yang Pengfei +yangchenliang +Yanqiang Miao +Yao Zaiyong +Yash Murty +Yassine Tijani +Yasunori Mahata +Yazhong Liu +Yestin Sun +Yi EungJun +Yibai Zhang +Yihang Ho +Ying Li +Yohei Ueda +Yong Tang +Yongxin Li +Yongzhi Pan +Yosef Fertel +You-Sheng Yang (楊有勝) +youcai +Youcef YEKHLEF +Yu Changchun +Yu Chengxia +Yu Peng +Yu-Ju Hong +Yuan Sun +Yuanhong Peng +Yue Zhang +Yuhao Fang +Yuichiro Kaneko +Yunxiang Huang +Yurii Rashkovskii +Yusuf Tarık Günaydın +Yves Junqueira +Zac Dover +Zach Borboa +Zachary Jaffee +Zain Memon +Zaiste! +Zane DeGraffenried +Zefan Li +Zen Lin(Zhinan Lin) +Zhang Kun +Zhang Wei +Zhang Wentao +ZhangHang +zhangxianwei +Zhenan Ye <21551168@zju.edu.cn> +zhenghenghuo +Zhenhai Gao +Zhenkun Bi +zhipengzuo +Zhou Hao +Zhoulin Xie +Zhu Guihua +Zhu Kunjia +Zhuoyun Wei +Ziheng Liu +Zilin Du +zimbatm +Ziming Dong +ZJUshuaizhou <21551191@zju.edu.cn> +zmarouf +Zoltan Tombol +Zou Yu +zqh +Zuhayr Elahi +Zunayed Ali +Álex González +Álvaro Lázaro +Átila Camurça Alves +尹吉峰 +屈骏 +徐俊杰 +慕陶 +搏通 +黄艳红00139573 diff --git a/vendor/github.com/docker/docker/LICENSE b/vendor/github.com/docker/docker/LICENSE new file mode 100644 index 0000000000..6d8d58fb67 --- /dev/null +++ b/vendor/github.com/docker/docker/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2018 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/docker/NOTICE b/vendor/github.com/docker/docker/NOTICE new file mode 100644 index 0000000000..58b19b6d15 --- /dev/null +++ b/vendor/github.com/docker/docker/NOTICE @@ -0,0 +1,19 @@ +Docker +Copyright 2012-2017 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +This product contains software (https://github.com/creack/pty) developed +by Keith Rarick, licensed under the MIT License. + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/docker/docker/api/types/auth.go b/vendor/github.com/docker/docker/api/types/auth.go new file mode 100644 index 0000000000..ddf15bb182 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/auth.go @@ -0,0 +1,22 @@ +package types // import "github.com/docker/docker/api/types" + +// AuthConfig contains authorization information for connecting to a Registry +type AuthConfig struct { + Username string `json:"username,omitempty"` + Password string `json:"password,omitempty"` + Auth string `json:"auth,omitempty"` + + // Email is an optional value associated with the username. + // This field is deprecated and will be removed in a later + // version of docker. + Email string `json:"email,omitempty"` + + ServerAddress string `json:"serveraddress,omitempty"` + + // IdentityToken is used to authenticate the user and get + // an access token for the registry. + IdentityToken string `json:"identitytoken,omitempty"` + + // RegistryToken is a bearer token to be sent to a registry + RegistryToken string `json:"registrytoken,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go b/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go new file mode 100644 index 0000000000..bf3463b90e --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/blkiodev/blkio.go @@ -0,0 +1,23 @@ +package blkiodev // import "github.com/docker/docker/api/types/blkiodev" + +import "fmt" + +// WeightDevice is a structure that holds device:weight pair +type WeightDevice struct { + Path string + Weight uint16 +} + +func (w *WeightDevice) String() string { + return fmt.Sprintf("%s:%d", w.Path, w.Weight) +} + +// ThrottleDevice is a structure that holds device:rate_per_second pair +type ThrottleDevice struct { + Path string + Rate uint64 +} + +func (t *ThrottleDevice) String() string { + return fmt.Sprintf("%s:%d", t.Path, t.Rate) +} diff --git a/vendor/github.com/docker/docker/api/types/client.go b/vendor/github.com/docker/docker/api/types/client.go new file mode 100644 index 0000000000..9c464b73e2 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/client.go @@ -0,0 +1,419 @@ +package types // import "github.com/docker/docker/api/types" + +import ( + "bufio" + "io" + "net" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + units "github.com/docker/go-units" +) + +// CheckpointCreateOptions holds parameters to create a checkpoint from a container +type CheckpointCreateOptions struct { + CheckpointID string + CheckpointDir string + Exit bool +} + +// CheckpointListOptions holds parameters to list checkpoints for a container +type CheckpointListOptions struct { + CheckpointDir string +} + +// CheckpointDeleteOptions holds parameters to delete a checkpoint from a container +type CheckpointDeleteOptions struct { + CheckpointID string + CheckpointDir string +} + +// ContainerAttachOptions holds parameters to attach to a container. +type ContainerAttachOptions struct { + Stream bool + Stdin bool + Stdout bool + Stderr bool + DetachKeys string + Logs bool +} + +// ContainerCommitOptions holds parameters to commit changes into a container. +type ContainerCommitOptions struct { + Reference string + Comment string + Author string + Changes []string + Pause bool + Config *container.Config +} + +// ContainerExecInspect holds information returned by exec inspect. +type ContainerExecInspect struct { + ExecID string `json:"ID"` + ContainerID string + Running bool + ExitCode int + Pid int +} + +// ContainerListOptions holds parameters to list containers with. +type ContainerListOptions struct { + Quiet bool + Size bool + All bool + Latest bool + Since string + Before string + Limit int + Filters filters.Args +} + +// ContainerLogsOptions holds parameters to filter logs with. +type ContainerLogsOptions struct { + ShowStdout bool + ShowStderr bool + Since string + Until string + Timestamps bool + Follow bool + Tail string + Details bool +} + +// ContainerRemoveOptions holds parameters to remove containers. +type ContainerRemoveOptions struct { + RemoveVolumes bool + RemoveLinks bool + Force bool +} + +// ContainerStartOptions holds parameters to start containers. +type ContainerStartOptions struct { + CheckpointID string + CheckpointDir string +} + +// CopyToContainerOptions holds information +// about files to copy into a container +type CopyToContainerOptions struct { + AllowOverwriteDirWithFile bool + CopyUIDGID bool +} + +// EventsOptions holds parameters to filter events with. +type EventsOptions struct { + Since string + Until string + Filters filters.Args +} + +// NetworkListOptions holds parameters to filter the list of networks with. +type NetworkListOptions struct { + Filters filters.Args +} + +// HijackedResponse holds connection information for a hijacked request. +type HijackedResponse struct { + Conn net.Conn + Reader *bufio.Reader +} + +// Close closes the hijacked connection and reader. +func (h *HijackedResponse) Close() { + h.Conn.Close() +} + +// CloseWriter is an interface that implements structs +// that close input streams to prevent from writing. +type CloseWriter interface { + CloseWrite() error +} + +// CloseWrite closes a readWriter for writing. +func (h *HijackedResponse) CloseWrite() error { + if conn, ok := h.Conn.(CloseWriter); ok { + return conn.CloseWrite() + } + return nil +} + +// ImageBuildOptions holds the information +// necessary to build images. +type ImageBuildOptions struct { + Tags []string + SuppressOutput bool + RemoteContext string + NoCache bool + Remove bool + ForceRemove bool + PullParent bool + Isolation container.Isolation + CPUSetCPUs string + CPUSetMems string + CPUShares int64 + CPUQuota int64 + CPUPeriod int64 + Memory int64 + MemorySwap int64 + CgroupParent string + NetworkMode string + ShmSize int64 + Dockerfile string + Ulimits []*units.Ulimit + // BuildArgs needs to be a *string instead of just a string so that + // we can tell the difference between "" (empty string) and no value + // at all (nil). See the parsing of buildArgs in + // api/server/router/build/build_routes.go for even more info. + BuildArgs map[string]*string + AuthConfigs map[string]AuthConfig + Context io.Reader + Labels map[string]string + // squash the resulting image's layers to the parent + // preserves the original image and creates a new one from the parent with all + // the changes applied to a single layer + Squash bool + // CacheFrom specifies images that are used for matching cache. Images + // specified here do not need to have a valid parent chain to match cache. + CacheFrom []string + SecurityOpt []string + ExtraHosts []string // List of extra hosts + Target string + SessionID string + Platform string + // Version specifies the version of the unerlying builder to use + Version BuilderVersion + // BuildID is an optional identifier that can be passed together with the + // build request. The same identifier can be used to gracefully cancel the + // build with the cancel request. + BuildID string + // Outputs defines configurations for exporting build results. Only supported + // in BuildKit mode + Outputs []ImageBuildOutput +} + +// ImageBuildOutput defines configuration for exporting a build result +type ImageBuildOutput struct { + Type string + Attrs map[string]string +} + +// BuilderVersion sets the version of underlying builder to use +type BuilderVersion string + +const ( + // BuilderV1 is the first generation builder in docker daemon + BuilderV1 BuilderVersion = "1" + // BuilderBuildKit is builder based on moby/buildkit project + BuilderBuildKit BuilderVersion = "2" +) + +// ImageBuildResponse holds information +// returned by a server after building +// an image. +type ImageBuildResponse struct { + Body io.ReadCloser + OSType string +} + +// ImageCreateOptions holds information to create images. +type ImageCreateOptions struct { + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry. + Platform string // Platform is the target platform of the image if it needs to be pulled from the registry. +} + +// ImageImportSource holds source information for ImageImport +type ImageImportSource struct { + Source io.Reader // Source is the data to send to the server to create this image from. You must set SourceName to "-" to leverage this. + SourceName string // SourceName is the name of the image to pull. Set to "-" to leverage the Source attribute. +} + +// ImageImportOptions holds information to import images from the client host. +type ImageImportOptions struct { + Tag string // Tag is the name to tag this image with. This attribute is deprecated. + Message string // Message is the message to tag the image with + Changes []string // Changes are the raw changes to apply to this image + Platform string // Platform is the target platform of the image +} + +// ImageListOptions holds parameters to filter the list of images with. +type ImageListOptions struct { + All bool + Filters filters.Args +} + +// ImageLoadResponse returns information to the client about a load process. +type ImageLoadResponse struct { + // Body must be closed to avoid a resource leak + Body io.ReadCloser + JSON bool +} + +// ImagePullOptions holds information to pull images. +type ImagePullOptions struct { + All bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + PrivilegeFunc RequestPrivilegeFunc + Platform string +} + +// RequestPrivilegeFunc is a function interface that +// clients can supply to retry operations after +// getting an authorization error. +// This function returns the registry authentication +// header value in base 64 format, or an error +// if the privilege request fails. +type RequestPrivilegeFunc func() (string, error) + +// ImagePushOptions holds information to push images. +type ImagePushOptions ImagePullOptions + +// ImageRemoveOptions holds parameters to remove images. +type ImageRemoveOptions struct { + Force bool + PruneChildren bool +} + +// ImageSearchOptions holds parameters to search images with. +type ImageSearchOptions struct { + RegistryAuth string + PrivilegeFunc RequestPrivilegeFunc + Filters filters.Args + Limit int +} + +// ResizeOptions holds parameters to resize a tty. +// It can be used to resize container ttys and +// exec process ttys too. +type ResizeOptions struct { + Height uint + Width uint +} + +// NodeListOptions holds parameters to list nodes with. +type NodeListOptions struct { + Filters filters.Args +} + +// NodeRemoveOptions holds parameters to remove nodes with. +type NodeRemoveOptions struct { + Force bool +} + +// ServiceCreateOptions contains the options to use when creating a service. +type ServiceCreateOptions struct { + // EncodedRegistryAuth is the encoded registry authorization credentials to + // use when updating the service. + // + // This field follows the format of the X-Registry-Auth header. + EncodedRegistryAuth string + + // QueryRegistry indicates whether the service update requires + // contacting a registry. A registry may be contacted to retrieve + // the image digest and manifest, which in turn can be used to update + // platform or other information about the service. + QueryRegistry bool +} + +// ServiceCreateResponse contains the information returned to a client +// on the creation of a new service. +type ServiceCreateResponse struct { + // ID is the ID of the created service. + ID string + // Warnings is a set of non-fatal warning messages to pass on to the user. + Warnings []string `json:",omitempty"` +} + +// Values for RegistryAuthFrom in ServiceUpdateOptions +const ( + RegistryAuthFromSpec = "spec" + RegistryAuthFromPreviousSpec = "previous-spec" +) + +// ServiceUpdateOptions contains the options to be used for updating services. +type ServiceUpdateOptions struct { + // EncodedRegistryAuth is the encoded registry authorization credentials to + // use when updating the service. + // + // This field follows the format of the X-Registry-Auth header. + EncodedRegistryAuth string + + // TODO(stevvooe): Consider moving the version parameter of ServiceUpdate + // into this field. While it does open API users up to racy writes, most + // users may not need that level of consistency in practice. + + // RegistryAuthFrom specifies where to find the registry authorization + // credentials if they are not given in EncodedRegistryAuth. Valid + // values are "spec" and "previous-spec". + RegistryAuthFrom string + + // Rollback indicates whether a server-side rollback should be + // performed. When this is set, the provided spec will be ignored. + // The valid values are "previous" and "none". An empty value is the + // same as "none". + Rollback string + + // QueryRegistry indicates whether the service update requires + // contacting a registry. A registry may be contacted to retrieve + // the image digest and manifest, which in turn can be used to update + // platform or other information about the service. + QueryRegistry bool +} + +// ServiceListOptions holds parameters to list services with. +type ServiceListOptions struct { + Filters filters.Args + + // Status indicates whether the server should include the service task + // count of running and desired tasks. + Status bool +} + +// ServiceInspectOptions holds parameters related to the "service inspect" +// operation. +type ServiceInspectOptions struct { + InsertDefaults bool +} + +// TaskListOptions holds parameters to list tasks with. +type TaskListOptions struct { + Filters filters.Args +} + +// PluginRemoveOptions holds parameters to remove plugins. +type PluginRemoveOptions struct { + Force bool +} + +// PluginEnableOptions holds parameters to enable plugins. +type PluginEnableOptions struct { + Timeout int +} + +// PluginDisableOptions holds parameters to disable plugins. +type PluginDisableOptions struct { + Force bool +} + +// PluginInstallOptions holds parameters to install a plugin. +type PluginInstallOptions struct { + Disabled bool + AcceptAllPermissions bool + RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry + RemoteRef string // RemoteRef is the plugin name on the registry + PrivilegeFunc RequestPrivilegeFunc + AcceptPermissionsFunc func(PluginPrivileges) (bool, error) + Args []string +} + +// SwarmUnlockKeyResponse contains the response for Engine API: +// GET /swarm/unlockkey +type SwarmUnlockKeyResponse struct { + // UnlockKey is the unlock key in ASCII-armored format. + UnlockKey string +} + +// PluginCreateOptions hold all options to plugin create. +type PluginCreateOptions struct { + RepoName string +} diff --git a/vendor/github.com/docker/docker/api/types/configs.go b/vendor/github.com/docker/docker/api/types/configs.go new file mode 100644 index 0000000000..3dd133a3a5 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/configs.go @@ -0,0 +1,66 @@ +package types // import "github.com/docker/docker/api/types" + +import ( + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/network" + specs "github.com/opencontainers/image-spec/specs-go/v1" +) + +// configs holds structs used for internal communication between the +// frontend (such as an http server) and the backend (such as the +// docker daemon). + +// ContainerCreateConfig is the parameter set to ContainerCreate() +type ContainerCreateConfig struct { + Name string + Config *container.Config + HostConfig *container.HostConfig + NetworkingConfig *network.NetworkingConfig + Platform *specs.Platform + AdjustCPUShares bool +} + +// ContainerRmConfig holds arguments for the container remove +// operation. This struct is used to tell the backend what operations +// to perform. +type ContainerRmConfig struct { + ForceRemove, RemoveVolume, RemoveLink bool +} + +// ExecConfig is a small subset of the Config struct that holds the configuration +// for the exec feature of docker. +type ExecConfig struct { + User string // User that will run the command + Privileged bool // Is the container in privileged mode + Tty bool // Attach standard streams to a tty. + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStderr bool // Attach the standard error + AttachStdout bool // Attach the standard output + Detach bool // Execute in detach mode + DetachKeys string // Escape keys for detach + Env []string // Environment variables + WorkingDir string // Working directory + Cmd []string // Execution commands and args +} + +// PluginRmConfig holds arguments for plugin remove. +type PluginRmConfig struct { + ForceRemove bool +} + +// PluginEnableConfig holds arguments for plugin enable +type PluginEnableConfig struct { + Timeout int +} + +// PluginDisableConfig holds arguments for plugin disable. +type PluginDisableConfig struct { + ForceDisable bool +} + +// NetworkListConfig stores the options available for listing networks +type NetworkListConfig struct { + // TODO(@cpuguy83): naming is hard, this is pulled from what was being used in the router before moving here + Detailed bool + Verbose bool +} diff --git a/vendor/github.com/docker/docker/api/types/container/config.go b/vendor/github.com/docker/docker/api/types/container/config.go new file mode 100644 index 0000000000..f767195b94 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/config.go @@ -0,0 +1,69 @@ +package container // import "github.com/docker/docker/api/types/container" + +import ( + "time" + + "github.com/docker/docker/api/types/strslice" + "github.com/docker/go-connections/nat" +) + +// MinimumDuration puts a minimum on user configured duration. +// This is to prevent API error on time unit. For example, API may +// set 3 as healthcheck interval with intention of 3 seconds, but +// Docker interprets it as 3 nanoseconds. +const MinimumDuration = 1 * time.Millisecond + +// HealthConfig holds configuration settings for the HEALTHCHECK feature. +type HealthConfig struct { + // Test is the test to perform to check that the container is healthy. + // An empty slice means to inherit the default. + // The options are: + // {} : inherit healthcheck + // {"NONE"} : disable healthcheck + // {"CMD", args...} : exec arguments directly + // {"CMD-SHELL", command} : run command with system's default shell + Test []string `json:",omitempty"` + + // Zero means to inherit. Durations are expressed as integer nanoseconds. + Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. + Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. + StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down. + + // Retries is the number of consecutive failures needed to consider a container as unhealthy. + // Zero means inherit. + Retries int `json:",omitempty"` +} + +// Config contains the configuration data about a container. +// It should hold only portable information about the container. +// Here, "portable" means "independent from the host we are running on". +// Non-portable information *should* appear in HostConfig. +// All fields added to this struct must be marked `omitempty` to keep getting +// predictable hashes from the old `v1Compatibility` configuration. +type Config struct { + Hostname string // Hostname + Domainname string // Domainname + User string // User that will run the command(s) inside the container, also support user:group + AttachStdin bool // Attach the standard input, makes possible user interaction + AttachStdout bool // Attach the standard output + AttachStderr bool // Attach the standard error + ExposedPorts nat.PortSet `json:",omitempty"` // List of exposed ports + Tty bool // Attach standard streams to a tty, including stdin if it is not closed. + OpenStdin bool // Open stdin + StdinOnce bool // If true, close stdin after the 1 attached client disconnects. + Env []string // List of environment variable to set in the container + Cmd strslice.StrSlice // Command to run when starting the container + Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy + ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (meaning treat as a command line) (Windows specific). + Image string // Name of the image as it was passed by the operator (e.g. could be symbolic) + Volumes map[string]struct{} // List of volumes (mounts) used for the container + WorkingDir string // Current directory (PWD) in the command will be launched + Entrypoint strslice.StrSlice // Entrypoint to run when starting the container + NetworkDisabled bool `json:",omitempty"` // Is network disabled + MacAddress string `json:",omitempty"` // Mac Address of the container + OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile + Labels map[string]string // List of labels set to this container + StopSignal string `json:",omitempty"` // Signal to stop a container + StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container + Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT +} diff --git a/vendor/github.com/docker/docker/api/types/container/container_changes.go b/vendor/github.com/docker/docker/api/types/container/container_changes.go new file mode 100644 index 0000000000..16dd5019ee --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/container_changes.go @@ -0,0 +1,20 @@ +package container // import "github.com/docker/docker/api/types/container" + +// ---------------------------------------------------------------------------- +// Code generated by `swagger generate operation`. DO NOT EDIT. +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// ContainerChangeResponseItem change item in response to ContainerChanges operation +// swagger:model ContainerChangeResponseItem +type ContainerChangeResponseItem struct { + + // Kind of change + // Required: true + Kind uint8 `json:"Kind"` + + // Path to file that has changed + // Required: true + Path string `json:"Path"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/container_create.go b/vendor/github.com/docker/docker/api/types/container/container_create.go new file mode 100644 index 0000000000..d0c852f84d --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/container_create.go @@ -0,0 +1,20 @@ +package container // import "github.com/docker/docker/api/types/container" + +// ---------------------------------------------------------------------------- +// Code generated by `swagger generate operation`. DO NOT EDIT. +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// ContainerCreateCreatedBody OK response to ContainerCreate operation +// swagger:model ContainerCreateCreatedBody +type ContainerCreateCreatedBody struct { + + // The ID of the created container + // Required: true + ID string `json:"Id"` + + // Warnings encountered when creating the container + // Required: true + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/container_top.go b/vendor/github.com/docker/docker/api/types/container/container_top.go new file mode 100644 index 0000000000..63381da367 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/container_top.go @@ -0,0 +1,22 @@ +package container // import "github.com/docker/docker/api/types/container" + +// ---------------------------------------------------------------------------- +// Code generated by `swagger generate operation`. DO NOT EDIT. +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// ContainerTopOKBody OK response to ContainerTop operation +// swagger:model ContainerTopOKBody +type ContainerTopOKBody struct { + + // Each process running in the container, where each is process + // is an array of values corresponding to the titles. + // + // Required: true + Processes [][]string `json:"Processes"` + + // The ps column titles + // Required: true + Titles []string `json:"Titles"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/container_update.go b/vendor/github.com/docker/docker/api/types/container/container_update.go new file mode 100644 index 0000000000..c10f175ea8 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/container_update.go @@ -0,0 +1,16 @@ +package container // import "github.com/docker/docker/api/types/container" + +// ---------------------------------------------------------------------------- +// Code generated by `swagger generate operation`. DO NOT EDIT. +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// ContainerUpdateOKBody OK response to ContainerUpdate operation +// swagger:model ContainerUpdateOKBody +type ContainerUpdateOKBody struct { + + // warnings + // Required: true + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/container_wait.go b/vendor/github.com/docker/docker/api/types/container/container_wait.go new file mode 100644 index 0000000000..49e05ae669 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/container_wait.go @@ -0,0 +1,28 @@ +package container // import "github.com/docker/docker/api/types/container" + +// ---------------------------------------------------------------------------- +// Code generated by `swagger generate operation`. DO NOT EDIT. +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// ContainerWaitOKBodyError container waiting error, if any +// swagger:model ContainerWaitOKBodyError +type ContainerWaitOKBodyError struct { + + // Details of an error + Message string `json:"Message,omitempty"` +} + +// ContainerWaitOKBody OK response to ContainerWait operation +// swagger:model ContainerWaitOKBody +type ContainerWaitOKBody struct { + + // error + // Required: true + Error *ContainerWaitOKBodyError `json:"Error"` + + // Exit code of the container + // Required: true + StatusCode int64 `json:"StatusCode"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/host_config.go b/vendor/github.com/docker/docker/api/types/container/host_config.go new file mode 100644 index 0000000000..2d1cbaa9ab --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/host_config.go @@ -0,0 +1,447 @@ +package container // import "github.com/docker/docker/api/types/container" + +import ( + "strings" + + "github.com/docker/docker/api/types/blkiodev" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/strslice" + "github.com/docker/go-connections/nat" + units "github.com/docker/go-units" +) + +// CgroupnsMode represents the cgroup namespace mode of the container +type CgroupnsMode string + +// IsPrivate indicates whether the container uses its own private cgroup namespace +func (c CgroupnsMode) IsPrivate() bool { + return c == "private" +} + +// IsHost indicates whether the container shares the host's cgroup namespace +func (c CgroupnsMode) IsHost() bool { + return c == "host" +} + +// IsEmpty indicates whether the container cgroup namespace mode is unset +func (c CgroupnsMode) IsEmpty() bool { + return c == "" +} + +// Valid indicates whether the cgroup namespace mode is valid +func (c CgroupnsMode) Valid() bool { + return c.IsEmpty() || c.IsPrivate() || c.IsHost() +} + +// Isolation represents the isolation technology of a container. The supported +// values are platform specific +type Isolation string + +// IsDefault indicates the default isolation technology of a container. On Linux this +// is the native driver. On Windows, this is a Windows Server Container. +func (i Isolation) IsDefault() bool { + return strings.ToLower(string(i)) == "default" || string(i) == "" +} + +// IsHyperV indicates the use of a Hyper-V partition for isolation +func (i Isolation) IsHyperV() bool { + return strings.ToLower(string(i)) == "hyperv" +} + +// IsProcess indicates the use of process isolation +func (i Isolation) IsProcess() bool { + return strings.ToLower(string(i)) == "process" +} + +const ( + // IsolationEmpty is unspecified (same behavior as default) + IsolationEmpty = Isolation("") + // IsolationDefault is the default isolation mode on current daemon + IsolationDefault = Isolation("default") + // IsolationProcess is process isolation mode + IsolationProcess = Isolation("process") + // IsolationHyperV is HyperV isolation mode + IsolationHyperV = Isolation("hyperv") +) + +// IpcMode represents the container ipc stack. +type IpcMode string + +// IsPrivate indicates whether the container uses its own private ipc namespace which can not be shared. +func (n IpcMode) IsPrivate() bool { + return n == "private" +} + +// IsHost indicates whether the container shares the host's ipc namespace. +func (n IpcMode) IsHost() bool { + return n == "host" +} + +// IsShareable indicates whether the container's ipc namespace can be shared with another container. +func (n IpcMode) IsShareable() bool { + return n == "shareable" +} + +// IsContainer indicates whether the container uses another container's ipc namespace. +func (n IpcMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// IsNone indicates whether container IpcMode is set to "none". +func (n IpcMode) IsNone() bool { + return n == "none" +} + +// IsEmpty indicates whether container IpcMode is empty +func (n IpcMode) IsEmpty() bool { + return n == "" +} + +// Valid indicates whether the ipc mode is valid. +func (n IpcMode) Valid() bool { + return n.IsEmpty() || n.IsNone() || n.IsPrivate() || n.IsHost() || n.IsShareable() || n.IsContainer() +} + +// Container returns the name of the container ipc stack is going to be used. +func (n IpcMode) Container() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 && parts[0] == "container" { + return parts[1] + } + return "" +} + +// NetworkMode represents the container network stack. +type NetworkMode string + +// IsNone indicates whether container isn't using a network stack. +func (n NetworkMode) IsNone() bool { + return n == "none" +} + +// IsDefault indicates whether container uses the default network stack. +func (n NetworkMode) IsDefault() bool { + return n == "default" +} + +// IsPrivate indicates whether container uses its private network stack. +func (n NetworkMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// IsContainer indicates whether container uses a container network stack. +func (n NetworkMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// ConnectedContainer is the id of the container which network this container is connected to. +func (n NetworkMode) ConnectedContainer() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// UserDefined indicates user-created network +func (n NetworkMode) UserDefined() string { + if n.IsUserDefined() { + return string(n) + } + return "" +} + +// UsernsMode represents userns mode in the container. +type UsernsMode string + +// IsHost indicates whether the container uses the host's userns. +func (n UsernsMode) IsHost() bool { + return n == "host" +} + +// IsPrivate indicates whether the container uses the a private userns. +func (n UsernsMode) IsPrivate() bool { + return !(n.IsHost()) +} + +// Valid indicates whether the userns is valid. +func (n UsernsMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + default: + return false + } + return true +} + +// CgroupSpec represents the cgroup to use for the container. +type CgroupSpec string + +// IsContainer indicates whether the container is using another container cgroup +func (c CgroupSpec) IsContainer() bool { + parts := strings.SplitN(string(c), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// Valid indicates whether the cgroup spec is valid. +func (c CgroupSpec) Valid() bool { + return c.IsContainer() || c == "" +} + +// Container returns the name of the container whose cgroup will be used. +func (c CgroupSpec) Container() string { + parts := strings.SplitN(string(c), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// UTSMode represents the UTS namespace of the container. +type UTSMode string + +// IsPrivate indicates whether the container uses its private UTS namespace. +func (n UTSMode) IsPrivate() bool { + return !(n.IsHost()) +} + +// IsHost indicates whether the container uses the host's UTS namespace. +func (n UTSMode) IsHost() bool { + return n == "host" +} + +// Valid indicates whether the UTS namespace is valid. +func (n UTSMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + default: + return false + } + return true +} + +// PidMode represents the pid namespace of the container. +type PidMode string + +// IsPrivate indicates whether the container uses its own new pid namespace. +func (n PidMode) IsPrivate() bool { + return !(n.IsHost() || n.IsContainer()) +} + +// IsHost indicates whether the container uses the host's pid namespace. +func (n PidMode) IsHost() bool { + return n == "host" +} + +// IsContainer indicates whether the container uses a container's pid namespace. +func (n PidMode) IsContainer() bool { + parts := strings.SplitN(string(n), ":", 2) + return len(parts) > 1 && parts[0] == "container" +} + +// Valid indicates whether the pid namespace is valid. +func (n PidMode) Valid() bool { + parts := strings.Split(string(n), ":") + switch mode := parts[0]; mode { + case "", "host": + case "container": + if len(parts) != 2 || parts[1] == "" { + return false + } + default: + return false + } + return true +} + +// Container returns the name of the container whose pid namespace is going to be used. +func (n PidMode) Container() string { + parts := strings.SplitN(string(n), ":", 2) + if len(parts) > 1 { + return parts[1] + } + return "" +} + +// DeviceRequest represents a request for devices from a device driver. +// Used by GPU device drivers. +type DeviceRequest struct { + Driver string // Name of device driver + Count int // Number of devices to request (-1 = All) + DeviceIDs []string // List of device IDs as recognizable by the device driver + Capabilities [][]string // An OR list of AND lists of device capabilities (e.g. "gpu") + Options map[string]string // Options to pass onto the device driver +} + +// DeviceMapping represents the device mapping between the host and the container. +type DeviceMapping struct { + PathOnHost string + PathInContainer string + CgroupPermissions string +} + +// RestartPolicy represents the restart policies of the container. +type RestartPolicy struct { + Name string + MaximumRetryCount int +} + +// IsNone indicates whether the container has the "no" restart policy. +// This means the container will not automatically restart when exiting. +func (rp *RestartPolicy) IsNone() bool { + return rp.Name == "no" || rp.Name == "" +} + +// IsAlways indicates whether the container has the "always" restart policy. +// This means the container will automatically restart regardless of the exit status. +func (rp *RestartPolicy) IsAlways() bool { + return rp.Name == "always" +} + +// IsOnFailure indicates whether the container has the "on-failure" restart policy. +// This means the container will automatically restart of exiting with a non-zero exit status. +func (rp *RestartPolicy) IsOnFailure() bool { + return rp.Name == "on-failure" +} + +// IsUnlessStopped indicates whether the container has the +// "unless-stopped" restart policy. This means the container will +// automatically restart unless user has put it to stopped state. +func (rp *RestartPolicy) IsUnlessStopped() bool { + return rp.Name == "unless-stopped" +} + +// IsSame compares two RestartPolicy to see if they are the same +func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool { + return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount +} + +// LogMode is a type to define the available modes for logging +// These modes affect how logs are handled when log messages start piling up. +type LogMode string + +// Available logging modes +const ( + LogModeUnset = "" + LogModeBlocking LogMode = "blocking" + LogModeNonBlock LogMode = "non-blocking" +) + +// LogConfig represents the logging configuration of the container. +type LogConfig struct { + Type string + Config map[string]string +} + +// Resources contains container's resources (cgroups config, ulimits...) +type Resources struct { + // Applicable to all platforms + CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers) + Memory int64 // Memory limit (in bytes) + NanoCPUs int64 `json:"NanoCpus"` // CPU quota in units of 10-9 CPUs. + + // Applicable to UNIX platforms + CgroupParent string // Parent cgroup. + BlkioWeight uint16 // Block IO weight (relative weight vs. other containers) + BlkioWeightDevice []*blkiodev.WeightDevice + BlkioDeviceReadBps []*blkiodev.ThrottleDevice + BlkioDeviceWriteBps []*blkiodev.ThrottleDevice + BlkioDeviceReadIOps []*blkiodev.ThrottleDevice + BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice + CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period + CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota + CPURealtimePeriod int64 `json:"CpuRealtimePeriod"` // CPU real-time period + CPURealtimeRuntime int64 `json:"CpuRealtimeRuntime"` // CPU real-time runtime + CpusetCpus string // CpusetCpus 0-2, 0,1 + CpusetMems string // CpusetMems 0-2, 0,1 + Devices []DeviceMapping // List of devices to map inside the container + DeviceCgroupRules []string // List of rule to be added to the device cgroup + DeviceRequests []DeviceRequest // List of device requests for device drivers + KernelMemory int64 // Kernel memory limit (in bytes), Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes + KernelMemoryTCP int64 // Hard limit for kernel TCP buffer memory (in bytes) + MemoryReservation int64 // Memory soft limit (in bytes) + MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap + MemorySwappiness *int64 // Tuning container memory swappiness behaviour + OomKillDisable *bool // Whether to disable OOM Killer or not + PidsLimit *int64 // Setting PIDs limit for a container; Set `0` or `-1` for unlimited, or `null` to not change. + Ulimits []*units.Ulimit // List of ulimits to be set in the container + + // Applicable to Windows + CPUCount int64 `json:"CpuCount"` // CPU count + CPUPercent int64 `json:"CpuPercent"` // CPU percent + IOMaximumIOps uint64 // Maximum IOps for the container system drive + IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive +} + +// UpdateConfig holds the mutable attributes of a Container. +// Those attributes can be updated at runtime. +type UpdateConfig struct { + // Contains container's resources (cgroups, ulimits) + Resources + RestartPolicy RestartPolicy +} + +// HostConfig the non-portable Config structure of a container. +// Here, "non-portable" means "dependent of the host we are running on". +// Portable information *should* appear in Config. +type HostConfig struct { + // Applicable to all platforms + Binds []string // List of volume bindings for this container + ContainerIDFile string // File (path) where the containerId is written + LogConfig LogConfig // Configuration of the logs for this container + NetworkMode NetworkMode // Network mode to use for the container + PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host + RestartPolicy RestartPolicy // Restart policy to be used for the container + AutoRemove bool // Automatically remove container when it exits + VolumeDriver string // Name of the volume driver used to mount volumes + VolumesFrom []string // List of volumes to take from other container + + // Applicable to UNIX platforms + CapAdd strslice.StrSlice // List of kernel capabilities to add to the container + CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container + CgroupnsMode CgroupnsMode // Cgroup namespace mode to use for the container + DNS []string `json:"Dns"` // List of DNS server to lookup + DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for + DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for + ExtraHosts []string // List of extra hosts + GroupAdd []string // List of additional groups that the container process will run as + IpcMode IpcMode // IPC namespace to use for the container + Cgroup CgroupSpec // Cgroup to use for the container + Links []string // List of links (in the name:alias form) + OomScoreAdj int // Container preference for OOM-killing + PidMode PidMode // PID namespace to use for the container + Privileged bool // Is the container in privileged mode + PublishAllPorts bool // Should docker publish all exposed port for the container + ReadonlyRootfs bool // Is the container root filesystem in read-only + SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux. + StorageOpt map[string]string `json:",omitempty"` // Storage driver options per container. + Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container + UTSMode UTSMode // UTS namespace to use for the container + UsernsMode UsernsMode // The user namespace to use for the container + ShmSize int64 // Total shm memory usage + Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container + Runtime string `json:",omitempty"` // Runtime to use with this container + + // Applicable to Windows + ConsoleSize [2]uint // Initial console size (height,width) + Isolation Isolation // Isolation technology of the container (e.g. default, hyperv) + + // Contains container's resources (cgroups, ulimits) + Resources + + // Mounts specs used by the container + Mounts []mount.Mount `json:",omitempty"` + + // MaskedPaths is the list of paths to be masked inside the container (this overrides the default set of paths) + MaskedPaths []string + + // ReadonlyPaths is the list of paths to be set as read-only inside the container (this overrides the default set of paths) + ReadonlyPaths []string + + // Run a custom init inside the container, if null, use the daemon's configured settings + Init *bool `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go new file mode 100644 index 0000000000..cf6fdf4402 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/hostconfig_unix.go @@ -0,0 +1,41 @@ +// +build !windows + +package container // import "github.com/docker/docker/api/types/container" + +// IsValid indicates if an isolation technology is valid +func (i Isolation) IsValid() bool { + return i.IsDefault() +} + +// NetworkName returns the name of the network stack. +func (n NetworkMode) NetworkName() string { + if n.IsBridge() { + return "bridge" + } else if n.IsHost() { + return "host" + } else if n.IsContainer() { + return "container" + } else if n.IsNone() { + return "none" + } else if n.IsDefault() { + return "default" + } else if n.IsUserDefined() { + return n.UserDefined() + } + return "" +} + +// IsBridge indicates whether container uses the bridge network stack +func (n NetworkMode) IsBridge() bool { + return n == "bridge" +} + +// IsHost indicates whether container uses the host network stack. +func (n NetworkMode) IsHost() bool { + return n == "host" +} + +// IsUserDefined indicates user-created network +func (n NetworkMode) IsUserDefined() bool { + return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer() +} diff --git a/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go b/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go new file mode 100644 index 0000000000..99f803a5bb --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/hostconfig_windows.go @@ -0,0 +1,40 @@ +package container // import "github.com/docker/docker/api/types/container" + +// IsBridge indicates whether container uses the bridge network stack +// in windows it is given the name NAT +func (n NetworkMode) IsBridge() bool { + return n == "nat" +} + +// IsHost indicates whether container uses the host network stack. +// returns false as this is not supported by windows +func (n NetworkMode) IsHost() bool { + return false +} + +// IsUserDefined indicates user-created network +func (n NetworkMode) IsUserDefined() bool { + return !n.IsDefault() && !n.IsNone() && !n.IsBridge() && !n.IsContainer() +} + +// IsValid indicates if an isolation technology is valid +func (i Isolation) IsValid() bool { + return i.IsDefault() || i.IsHyperV() || i.IsProcess() +} + +// NetworkName returns the name of the network stack. +func (n NetworkMode) NetworkName() string { + if n.IsDefault() { + return "default" + } else if n.IsBridge() { + return "nat" + } else if n.IsNone() { + return "none" + } else if n.IsContainer() { + return "container" + } else if n.IsUserDefined() { + return n.UserDefined() + } + + return "" +} diff --git a/vendor/github.com/docker/docker/api/types/container/waitcondition.go b/vendor/github.com/docker/docker/api/types/container/waitcondition.go new file mode 100644 index 0000000000..cd8311f99c --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/container/waitcondition.go @@ -0,0 +1,22 @@ +package container // import "github.com/docker/docker/api/types/container" + +// WaitCondition is a type used to specify a container state for which +// to wait. +type WaitCondition string + +// Possible WaitCondition Values. +// +// WaitConditionNotRunning (default) is used to wait for any of the non-running +// states: "created", "exited", "dead", "removing", or "removed". +// +// WaitConditionNextExit is used to wait for the next time the state changes +// to a non-running state. If the state is currently "created" or "exited", +// this would cause Wait() to block until either the container runs and exits +// or is removed. +// +// WaitConditionRemoved is used to wait for the container to be removed. +const ( + WaitConditionNotRunning WaitCondition = "not-running" + WaitConditionNextExit WaitCondition = "next-exit" + WaitConditionRemoved WaitCondition = "removed" +) diff --git a/vendor/github.com/docker/docker/api/types/error_response.go b/vendor/github.com/docker/docker/api/types/error_response.go new file mode 100644 index 0000000000..dc942d9d9e --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/error_response.go @@ -0,0 +1,13 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ErrorResponse Represents an error. +// swagger:model ErrorResponse +type ErrorResponse struct { + + // The error message. + // Required: true + Message string `json:"message"` +} diff --git a/vendor/github.com/docker/docker/api/types/error_response_ext.go b/vendor/github.com/docker/docker/api/types/error_response_ext.go new file mode 100644 index 0000000000..f84f034cd5 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/error_response_ext.go @@ -0,0 +1,6 @@ +package types + +// Error returns the error message +func (e ErrorResponse) Error() string { + return e.Message +} diff --git a/vendor/github.com/docker/docker/api/types/filters/parse.go b/vendor/github.com/docker/docker/api/types/filters/parse.go new file mode 100644 index 0000000000..4bc91cffd6 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/filters/parse.go @@ -0,0 +1,324 @@ +/*Package filters provides tools for encoding a mapping of keys to a set of +multiple values. +*/ +package filters // import "github.com/docker/docker/api/types/filters" + +import ( + "encoding/json" + "regexp" + "strings" + + "github.com/docker/docker/api/types/versions" +) + +// Args stores a mapping of keys to a set of multiple values. +type Args struct { + fields map[string]map[string]bool +} + +// KeyValuePair are used to initialize a new Args +type KeyValuePair struct { + Key string + Value string +} + +// Arg creates a new KeyValuePair for initializing Args +func Arg(key, value string) KeyValuePair { + return KeyValuePair{Key: key, Value: value} +} + +// NewArgs returns a new Args populated with the initial args +func NewArgs(initialArgs ...KeyValuePair) Args { + args := Args{fields: map[string]map[string]bool{}} + for _, arg := range initialArgs { + args.Add(arg.Key, arg.Value) + } + return args +} + +// Keys returns all the keys in list of Args +func (args Args) Keys() []string { + keys := make([]string, 0, len(args.fields)) + for k := range args.fields { + keys = append(keys, k) + } + return keys +} + +// MarshalJSON returns a JSON byte representation of the Args +func (args Args) MarshalJSON() ([]byte, error) { + if len(args.fields) == 0 { + return []byte{}, nil + } + return json.Marshal(args.fields) +} + +// ToJSON returns the Args as a JSON encoded string +func ToJSON(a Args) (string, error) { + if a.Len() == 0 { + return "", nil + } + buf, err := json.Marshal(a) + return string(buf), err +} + +// ToParamWithVersion encodes Args as a JSON string. If version is less than 1.22 +// then the encoded format will use an older legacy format where the values are a +// list of strings, instead of a set. +// +// Deprecated: do not use in any new code; use ToJSON instead +func ToParamWithVersion(version string, a Args) (string, error) { + if a.Len() == 0 { + return "", nil + } + + if version != "" && versions.LessThan(version, "1.22") { + buf, err := json.Marshal(convertArgsToSlice(a.fields)) + return string(buf), err + } + + return ToJSON(a) +} + +// FromJSON decodes a JSON encoded string into Args +func FromJSON(p string) (Args, error) { + args := NewArgs() + + if p == "" { + return args, nil + } + + raw := []byte(p) + err := json.Unmarshal(raw, &args) + if err == nil { + return args, nil + } + + // Fallback to parsing arguments in the legacy slice format + deprecated := map[string][]string{} + if legacyErr := json.Unmarshal(raw, &deprecated); legacyErr != nil { + return args, err + } + + args.fields = deprecatedArgs(deprecated) + return args, nil +} + +// UnmarshalJSON populates the Args from JSON encode bytes +func (args Args) UnmarshalJSON(raw []byte) error { + if len(raw) == 0 { + return nil + } + return json.Unmarshal(raw, &args.fields) +} + +// Get returns the list of values associated with the key +func (args Args) Get(key string) []string { + values := args.fields[key] + if values == nil { + return make([]string, 0) + } + slice := make([]string, 0, len(values)) + for key := range values { + slice = append(slice, key) + } + return slice +} + +// Add a new value to the set of values +func (args Args) Add(key, value string) { + if _, ok := args.fields[key]; ok { + args.fields[key][value] = true + } else { + args.fields[key] = map[string]bool{value: true} + } +} + +// Del removes a value from the set +func (args Args) Del(key, value string) { + if _, ok := args.fields[key]; ok { + delete(args.fields[key], value) + if len(args.fields[key]) == 0 { + delete(args.fields, key) + } + } +} + +// Len returns the number of keys in the mapping +func (args Args) Len() int { + return len(args.fields) +} + +// MatchKVList returns true if all the pairs in sources exist as key=value +// pairs in the mapping at key, or if there are no values at key. +func (args Args) MatchKVList(key string, sources map[string]string) bool { + fieldValues := args.fields[key] + + // do not filter if there is no filter set or cannot determine filter + if len(fieldValues) == 0 { + return true + } + + if len(sources) == 0 { + return false + } + + for value := range fieldValues { + testKV := strings.SplitN(value, "=", 2) + + v, ok := sources[testKV[0]] + if !ok { + return false + } + if len(testKV) == 2 && testKV[1] != v { + return false + } + } + + return true +} + +// Match returns true if any of the values at key match the source string +func (args Args) Match(field, source string) bool { + if args.ExactMatch(field, source) { + return true + } + + fieldValues := args.fields[field] + for name2match := range fieldValues { + match, err := regexp.MatchString(name2match, source) + if err != nil { + continue + } + if match { + return true + } + } + return false +} + +// ExactMatch returns true if the source matches exactly one of the values. +func (args Args) ExactMatch(key, source string) bool { + fieldValues, ok := args.fields[key] + // do not filter if there is no filter set or cannot determine filter + if !ok || len(fieldValues) == 0 { + return true + } + + // try to match full name value to avoid O(N) regular expression matching + return fieldValues[source] +} + +// UniqueExactMatch returns true if there is only one value and the source +// matches exactly the value. +func (args Args) UniqueExactMatch(key, source string) bool { + fieldValues := args.fields[key] + // do not filter if there is no filter set or cannot determine filter + if len(fieldValues) == 0 { + return true + } + if len(args.fields[key]) != 1 { + return false + } + + // try to match full name value to avoid O(N) regular expression matching + return fieldValues[source] +} + +// FuzzyMatch returns true if the source matches exactly one value, or the +// source has one of the values as a prefix. +func (args Args) FuzzyMatch(key, source string) bool { + if args.ExactMatch(key, source) { + return true + } + + fieldValues := args.fields[key] + for prefix := range fieldValues { + if strings.HasPrefix(source, prefix) { + return true + } + } + return false +} + +// Contains returns true if the key exists in the mapping +func (args Args) Contains(field string) bool { + _, ok := args.fields[field] + return ok +} + +type invalidFilter string + +func (e invalidFilter) Error() string { + return "Invalid filter '" + string(e) + "'" +} + +func (invalidFilter) InvalidParameter() {} + +// Validate compared the set of accepted keys against the keys in the mapping. +// An error is returned if any mapping keys are not in the accepted set. +func (args Args) Validate(accepted map[string]bool) error { + for name := range args.fields { + if !accepted[name] { + return invalidFilter(name) + } + } + return nil +} + +// WalkValues iterates over the list of values for a key in the mapping and calls +// op() for each value. If op returns an error the iteration stops and the +// error is returned. +func (args Args) WalkValues(field string, op func(value string) error) error { + if _, ok := args.fields[field]; !ok { + return nil + } + for v := range args.fields[field] { + if err := op(v); err != nil { + return err + } + } + return nil +} + +// Clone returns a copy of args. +func (args Args) Clone() (newArgs Args) { + newArgs.fields = make(map[string]map[string]bool, len(args.fields)) + for k, m := range args.fields { + var mm map[string]bool + if m != nil { + mm = make(map[string]bool, len(m)) + for kk, v := range m { + mm[kk] = v + } + } + newArgs.fields[k] = mm + } + return newArgs +} + +func deprecatedArgs(d map[string][]string) map[string]map[string]bool { + m := map[string]map[string]bool{} + for k, v := range d { + values := map[string]bool{} + for _, vv := range v { + values[vv] = true + } + m[k] = values + } + return m +} + +func convertArgsToSlice(f map[string]map[string]bool) map[string][]string { + m := map[string][]string{} + for k, v := range f { + values := []string{} + for kk := range v { + if v[kk] { + values = append(values, kk) + } + } + m[k] = values + } + return m +} diff --git a/vendor/github.com/docker/docker/api/types/graph_driver_data.go b/vendor/github.com/docker/docker/api/types/graph_driver_data.go new file mode 100644 index 0000000000..4d9bf1c62c --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/graph_driver_data.go @@ -0,0 +1,17 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// GraphDriverData Information about a container's graph driver. +// swagger:model GraphDriverData +type GraphDriverData struct { + + // data + // Required: true + Data map[string]string `json:"Data"` + + // name + // Required: true + Name string `json:"Name"` +} diff --git a/vendor/github.com/docker/docker/api/types/id_response.go b/vendor/github.com/docker/docker/api/types/id_response.go new file mode 100644 index 0000000000..7592d2f8b1 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/id_response.go @@ -0,0 +1,13 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// IDResponse Response to an API call that returns just an Id +// swagger:model IdResponse +type IDResponse struct { + + // The id of the newly created object. + // Required: true + ID string `json:"Id"` +} diff --git a/vendor/github.com/docker/docker/api/types/image_delete_response_item.go b/vendor/github.com/docker/docker/api/types/image_delete_response_item.go new file mode 100644 index 0000000000..b9a65a0d8e --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/image_delete_response_item.go @@ -0,0 +1,15 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ImageDeleteResponseItem image delete response item +// swagger:model ImageDeleteResponseItem +type ImageDeleteResponseItem struct { + + // The image ID of an image that was deleted + Deleted string `json:"Deleted,omitempty"` + + // The image ID of an image that was untagged + Untagged string `json:"Untagged,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/image_summary.go b/vendor/github.com/docker/docker/api/types/image_summary.go new file mode 100644 index 0000000000..e145b3dcfc --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/image_summary.go @@ -0,0 +1,49 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ImageSummary image summary +// swagger:model ImageSummary +type ImageSummary struct { + + // containers + // Required: true + Containers int64 `json:"Containers"` + + // created + // Required: true + Created int64 `json:"Created"` + + // Id + // Required: true + ID string `json:"Id"` + + // labels + // Required: true + Labels map[string]string `json:"Labels"` + + // parent Id + // Required: true + ParentID string `json:"ParentId"` + + // repo digests + // Required: true + RepoDigests []string `json:"RepoDigests"` + + // repo tags + // Required: true + RepoTags []string `json:"RepoTags"` + + // shared size + // Required: true + SharedSize int64 `json:"SharedSize"` + + // size + // Required: true + Size int64 `json:"Size"` + + // virtual size + // Required: true + VirtualSize int64 `json:"VirtualSize"` +} diff --git a/vendor/github.com/docker/docker/api/types/mount/mount.go b/vendor/github.com/docker/docker/api/types/mount/mount.go new file mode 100644 index 0000000000..443b8d07a9 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/mount/mount.go @@ -0,0 +1,131 @@ +package mount // import "github.com/docker/docker/api/types/mount" + +import ( + "os" +) + +// Type represents the type of a mount. +type Type string + +// Type constants +const ( + // TypeBind is the type for mounting host dir + TypeBind Type = "bind" + // TypeVolume is the type for remote storage volumes + TypeVolume Type = "volume" + // TypeTmpfs is the type for mounting tmpfs + TypeTmpfs Type = "tmpfs" + // TypeNamedPipe is the type for mounting Windows named pipes + TypeNamedPipe Type = "npipe" +) + +// Mount represents a mount (volume). +type Mount struct { + Type Type `json:",omitempty"` + // Source specifies the name of the mount. Depending on mount type, this + // may be a volume name or a host path, or even ignored. + // Source is not supported for tmpfs (must be an empty value) + Source string `json:",omitempty"` + Target string `json:",omitempty"` + ReadOnly bool `json:",omitempty"` + Consistency Consistency `json:",omitempty"` + + BindOptions *BindOptions `json:",omitempty"` + VolumeOptions *VolumeOptions `json:",omitempty"` + TmpfsOptions *TmpfsOptions `json:",omitempty"` +} + +// Propagation represents the propagation of a mount. +type Propagation string + +const ( + // PropagationRPrivate RPRIVATE + PropagationRPrivate Propagation = "rprivate" + // PropagationPrivate PRIVATE + PropagationPrivate Propagation = "private" + // PropagationRShared RSHARED + PropagationRShared Propagation = "rshared" + // PropagationShared SHARED + PropagationShared Propagation = "shared" + // PropagationRSlave RSLAVE + PropagationRSlave Propagation = "rslave" + // PropagationSlave SLAVE + PropagationSlave Propagation = "slave" +) + +// Propagations is the list of all valid mount propagations +var Propagations = []Propagation{ + PropagationRPrivate, + PropagationPrivate, + PropagationRShared, + PropagationShared, + PropagationRSlave, + PropagationSlave, +} + +// Consistency represents the consistency requirements of a mount. +type Consistency string + +const ( + // ConsistencyFull guarantees bind mount-like consistency + ConsistencyFull Consistency = "consistent" + // ConsistencyCached mounts can cache read data and FS structure + ConsistencyCached Consistency = "cached" + // ConsistencyDelegated mounts can cache read and written data and structure + ConsistencyDelegated Consistency = "delegated" + // ConsistencyDefault provides "consistent" behavior unless overridden + ConsistencyDefault Consistency = "default" +) + +// BindOptions defines options specific to mounts of type "bind". +type BindOptions struct { + Propagation Propagation `json:",omitempty"` + NonRecursive bool `json:",omitempty"` +} + +// VolumeOptions represents the options for a mount of type volume. +type VolumeOptions struct { + NoCopy bool `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + DriverConfig *Driver `json:",omitempty"` +} + +// Driver represents a volume driver. +type Driver struct { + Name string `json:",omitempty"` + Options map[string]string `json:",omitempty"` +} + +// TmpfsOptions defines options specific to mounts of type "tmpfs". +type TmpfsOptions struct { + // Size sets the size of the tmpfs, in bytes. + // + // This will be converted to an operating system specific value + // depending on the host. For example, on linux, it will be converted to + // use a 'k', 'm' or 'g' syntax. BSD, though not widely supported with + // docker, uses a straight byte value. + // + // Percentages are not supported. + SizeBytes int64 `json:",omitempty"` + // Mode of the tmpfs upon creation + Mode os.FileMode `json:",omitempty"` + + // TODO(stevvooe): There are several more tmpfs flags, specified in the + // daemon, that are accepted. Only the most basic are added for now. + // + // From https://github.com/moby/sys/blob/mount/v0.1.1/mount/flags.go#L47-L56 + // + // var validFlags = map[string]bool{ + // "": true, + // "size": true, X + // "mode": true, X + // "uid": true, + // "gid": true, + // "nr_inodes": true, + // "nr_blocks": true, + // "mpol": true, + // } + // + // Some of these may be straightforward to add, but others, such as + // uid/gid have implications in a clustered system. +} diff --git a/vendor/github.com/docker/docker/api/types/network/network.go b/vendor/github.com/docker/docker/api/types/network/network.go new file mode 100644 index 0000000000..437b184c67 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/network/network.go @@ -0,0 +1,126 @@ +package network // import "github.com/docker/docker/api/types/network" +import ( + "github.com/docker/docker/api/types/filters" +) + +// Address represents an IP address +type Address struct { + Addr string + PrefixLen int +} + +// IPAM represents IP Address Management +type IPAM struct { + Driver string + Options map[string]string // Per network IPAM driver options + Config []IPAMConfig +} + +// IPAMConfig represents IPAM configurations +type IPAMConfig struct { + Subnet string `json:",omitempty"` + IPRange string `json:",omitempty"` + Gateway string `json:",omitempty"` + AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"` +} + +// EndpointIPAMConfig represents IPAM configurations for the endpoint +type EndpointIPAMConfig struct { + IPv4Address string `json:",omitempty"` + IPv6Address string `json:",omitempty"` + LinkLocalIPs []string `json:",omitempty"` +} + +// Copy makes a copy of the endpoint ipam config +func (cfg *EndpointIPAMConfig) Copy() *EndpointIPAMConfig { + cfgCopy := *cfg + cfgCopy.LinkLocalIPs = make([]string, 0, len(cfg.LinkLocalIPs)) + cfgCopy.LinkLocalIPs = append(cfgCopy.LinkLocalIPs, cfg.LinkLocalIPs...) + return &cfgCopy +} + +// PeerInfo represents one peer of an overlay network +type PeerInfo struct { + Name string + IP string +} + +// EndpointSettings stores the network endpoint details +type EndpointSettings struct { + // Configurations + IPAMConfig *EndpointIPAMConfig + Links []string + Aliases []string + // Operational data + NetworkID string + EndpointID string + Gateway string + IPAddress string + IPPrefixLen int + IPv6Gateway string + GlobalIPv6Address string + GlobalIPv6PrefixLen int + MacAddress string + DriverOpts map[string]string +} + +// Task carries the information about one backend task +type Task struct { + Name string + EndpointID string + EndpointIP string + Info map[string]string +} + +// ServiceInfo represents service parameters with the list of service's tasks +type ServiceInfo struct { + VIP string + Ports []string + LocalLBIndex int + Tasks []Task +} + +// Copy makes a deep copy of `EndpointSettings` +func (es *EndpointSettings) Copy() *EndpointSettings { + epCopy := *es + if es.IPAMConfig != nil { + epCopy.IPAMConfig = es.IPAMConfig.Copy() + } + + if es.Links != nil { + links := make([]string, 0, len(es.Links)) + epCopy.Links = append(links, es.Links...) + } + + if es.Aliases != nil { + aliases := make([]string, 0, len(es.Aliases)) + epCopy.Aliases = append(aliases, es.Aliases...) + } + return &epCopy +} + +// NetworkingConfig represents the container's networking configuration for each of its interfaces +// Carries the networking configs specified in the `docker run` and `docker network connect` commands +type NetworkingConfig struct { + EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network +} + +// ConfigReference specifies the source which provides a network's configuration +type ConfigReference struct { + Network string +} + +var acceptedFilters = map[string]bool{ + "dangling": true, + "driver": true, + "id": true, + "label": true, + "name": true, + "scope": true, + "type": true, +} + +// ValidateFilters validates the list of filter args with the available filters. +func ValidateFilters(filter filters.Args) error { + return filter.Validate(acceptedFilters) +} diff --git a/vendor/github.com/docker/docker/api/types/plugin.go b/vendor/github.com/docker/docker/api/types/plugin.go new file mode 100644 index 0000000000..abae48b9ab --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin.go @@ -0,0 +1,203 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Plugin A plugin for the Engine API +// swagger:model Plugin +type Plugin struct { + + // config + // Required: true + Config PluginConfig `json:"Config"` + + // True if the plugin is running. False if the plugin is not running, only installed. + // Required: true + Enabled bool `json:"Enabled"` + + // Id + ID string `json:"Id,omitempty"` + + // name + // Required: true + Name string `json:"Name"` + + // plugin remote reference used to push/pull the plugin + PluginReference string `json:"PluginReference,omitempty"` + + // settings + // Required: true + Settings PluginSettings `json:"Settings"` +} + +// PluginConfig The config of a plugin. +// swagger:model PluginConfig +type PluginConfig struct { + + // args + // Required: true + Args PluginConfigArgs `json:"Args"` + + // description + // Required: true + Description string `json:"Description"` + + // Docker Version used to create the plugin + DockerVersion string `json:"DockerVersion,omitempty"` + + // documentation + // Required: true + Documentation string `json:"Documentation"` + + // entrypoint + // Required: true + Entrypoint []string `json:"Entrypoint"` + + // env + // Required: true + Env []PluginEnv `json:"Env"` + + // interface + // Required: true + Interface PluginConfigInterface `json:"Interface"` + + // ipc host + // Required: true + IpcHost bool `json:"IpcHost"` + + // linux + // Required: true + Linux PluginConfigLinux `json:"Linux"` + + // mounts + // Required: true + Mounts []PluginMount `json:"Mounts"` + + // network + // Required: true + Network PluginConfigNetwork `json:"Network"` + + // pid host + // Required: true + PidHost bool `json:"PidHost"` + + // propagated mount + // Required: true + PropagatedMount string `json:"PropagatedMount"` + + // user + User PluginConfigUser `json:"User,omitempty"` + + // work dir + // Required: true + WorkDir string `json:"WorkDir"` + + // rootfs + Rootfs *PluginConfigRootfs `json:"rootfs,omitempty"` +} + +// PluginConfigArgs plugin config args +// swagger:model PluginConfigArgs +type PluginConfigArgs struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // value + // Required: true + Value []string `json:"Value"` +} + +// PluginConfigInterface The interface between Docker and the plugin +// swagger:model PluginConfigInterface +type PluginConfigInterface struct { + + // Protocol to use for clients connecting to the plugin. + ProtocolScheme string `json:"ProtocolScheme,omitempty"` + + // socket + // Required: true + Socket string `json:"Socket"` + + // types + // Required: true + Types []PluginInterfaceType `json:"Types"` +} + +// PluginConfigLinux plugin config linux +// swagger:model PluginConfigLinux +type PluginConfigLinux struct { + + // allow all devices + // Required: true + AllowAllDevices bool `json:"AllowAllDevices"` + + // capabilities + // Required: true + Capabilities []string `json:"Capabilities"` + + // devices + // Required: true + Devices []PluginDevice `json:"Devices"` +} + +// PluginConfigNetwork plugin config network +// swagger:model PluginConfigNetwork +type PluginConfigNetwork struct { + + // type + // Required: true + Type string `json:"Type"` +} + +// PluginConfigRootfs plugin config rootfs +// swagger:model PluginConfigRootfs +type PluginConfigRootfs struct { + + // diff ids + DiffIds []string `json:"diff_ids"` + + // type + Type string `json:"type,omitempty"` +} + +// PluginConfigUser plugin config user +// swagger:model PluginConfigUser +type PluginConfigUser struct { + + // g ID + GID uint32 `json:"GID,omitempty"` + + // UID + UID uint32 `json:"UID,omitempty"` +} + +// PluginSettings Settings that can be modified by users. +// swagger:model PluginSettings +type PluginSettings struct { + + // args + // Required: true + Args []string `json:"Args"` + + // devices + // Required: true + Devices []PluginDevice `json:"Devices"` + + // env + // Required: true + Env []string `json:"Env"` + + // mounts + // Required: true + Mounts []PluginMount `json:"Mounts"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_device.go b/vendor/github.com/docker/docker/api/types/plugin_device.go new file mode 100644 index 0000000000..5699010675 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_device.go @@ -0,0 +1,25 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginDevice plugin device +// swagger:model PluginDevice +type PluginDevice struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // path + // Required: true + Path *string `json:"Path"` + + // settable + // Required: true + Settable []string `json:"Settable"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_env.go b/vendor/github.com/docker/docker/api/types/plugin_env.go new file mode 100644 index 0000000000..32962dc2eb --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_env.go @@ -0,0 +1,25 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginEnv plugin env +// swagger:model PluginEnv +type PluginEnv struct { + + // description + // Required: true + Description string `json:"Description"` + + // name + // Required: true + Name string `json:"Name"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // value + // Required: true + Value *string `json:"Value"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_interface_type.go b/vendor/github.com/docker/docker/api/types/plugin_interface_type.go new file mode 100644 index 0000000000..c82f204e87 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_interface_type.go @@ -0,0 +1,21 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginInterfaceType plugin interface type +// swagger:model PluginInterfaceType +type PluginInterfaceType struct { + + // capability + // Required: true + Capability string `json:"Capability"` + + // prefix + // Required: true + Prefix string `json:"Prefix"` + + // version + // Required: true + Version string `json:"Version"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_mount.go b/vendor/github.com/docker/docker/api/types/plugin_mount.go new file mode 100644 index 0000000000..5c031cf8b5 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_mount.go @@ -0,0 +1,37 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// PluginMount plugin mount +// swagger:model PluginMount +type PluginMount struct { + + // description + // Required: true + Description string `json:"Description"` + + // destination + // Required: true + Destination string `json:"Destination"` + + // name + // Required: true + Name string `json:"Name"` + + // options + // Required: true + Options []string `json:"Options"` + + // settable + // Required: true + Settable []string `json:"Settable"` + + // source + // Required: true + Source *string `json:"Source"` + + // type + // Required: true + Type string `json:"Type"` +} diff --git a/vendor/github.com/docker/docker/api/types/plugin_responses.go b/vendor/github.com/docker/docker/api/types/plugin_responses.go new file mode 100644 index 0000000000..60d1fb5ad8 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/plugin_responses.go @@ -0,0 +1,71 @@ +package types // import "github.com/docker/docker/api/types" + +import ( + "encoding/json" + "fmt" + "sort" +) + +// PluginsListResponse contains the response for the Engine API +type PluginsListResponse []*Plugin + +// UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType +func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error { + versionIndex := len(p) + prefixIndex := 0 + if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' { + return fmt.Errorf("%q is not a plugin interface type", p) + } + p = p[1 : len(p)-1] +loop: + for i, b := range p { + switch b { + case '.': + prefixIndex = i + case '/': + versionIndex = i + break loop + } + } + t.Prefix = string(p[:prefixIndex]) + t.Capability = string(p[prefixIndex+1 : versionIndex]) + if versionIndex < len(p) { + t.Version = string(p[versionIndex+1:]) + } + return nil +} + +// MarshalJSON implements json.Marshaler for PluginInterfaceType +func (t *PluginInterfaceType) MarshalJSON() ([]byte, error) { + return json.Marshal(t.String()) +} + +// String implements fmt.Stringer for PluginInterfaceType +func (t PluginInterfaceType) String() string { + return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version) +} + +// PluginPrivilege describes a permission the user has to accept +// upon installing a plugin. +type PluginPrivilege struct { + Name string + Description string + Value []string +} + +// PluginPrivileges is a list of PluginPrivilege +type PluginPrivileges []PluginPrivilege + +func (s PluginPrivileges) Len() int { + return len(s) +} + +func (s PluginPrivileges) Less(i, j int) bool { + return s[i].Name < s[j].Name +} + +func (s PluginPrivileges) Swap(i, j int) { + sort.Strings(s[i].Value) + sort.Strings(s[j].Value) + s[i], s[j] = s[j], s[i] +} diff --git a/vendor/github.com/docker/docker/api/types/port.go b/vendor/github.com/docker/docker/api/types/port.go new file mode 100644 index 0000000000..d91234744c --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/port.go @@ -0,0 +1,23 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Port An open port on a container +// swagger:model Port +type Port struct { + + // Host IP address that the container's port is mapped to + IP string `json:"IP,omitempty"` + + // Port on the container + // Required: true + PrivatePort uint16 `json:"PrivatePort"` + + // Port exposed on the host + PublicPort uint16 `json:"PublicPort,omitempty"` + + // type + // Required: true + Type string `json:"Type"` +} diff --git a/vendor/github.com/docker/docker/api/types/registry/authenticate.go b/vendor/github.com/docker/docker/api/types/registry/authenticate.go new file mode 100644 index 0000000000..f0a2113e40 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/registry/authenticate.go @@ -0,0 +1,21 @@ +package registry // import "github.com/docker/docker/api/types/registry" + +// ---------------------------------------------------------------------------- +// DO NOT EDIT THIS FILE +// This file was generated by `swagger generate operation` +// +// See hack/generate-swagger-api.sh +// ---------------------------------------------------------------------------- + +// AuthenticateOKBody authenticate o k body +// swagger:model AuthenticateOKBody +type AuthenticateOKBody struct { + + // An opaque token used to authenticate a user after a successful login + // Required: true + IdentityToken string `json:"IdentityToken"` + + // The status of the authentication + // Required: true + Status string `json:"Status"` +} diff --git a/vendor/github.com/docker/docker/api/types/registry/registry.go b/vendor/github.com/docker/docker/api/types/registry/registry.go new file mode 100644 index 0000000000..53e47084c8 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/registry/registry.go @@ -0,0 +1,119 @@ +package registry // import "github.com/docker/docker/api/types/registry" + +import ( + "encoding/json" + "net" + + v1 "github.com/opencontainers/image-spec/specs-go/v1" +) + +// ServiceConfig stores daemon registry services configuration. +type ServiceConfig struct { + AllowNondistributableArtifactsCIDRs []*NetIPNet + AllowNondistributableArtifactsHostnames []string + InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` + IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` + Mirrors []string +} + +// NetIPNet is the net.IPNet type, which can be marshalled and +// unmarshalled to JSON +type NetIPNet net.IPNet + +// String returns the CIDR notation of ipnet +func (ipnet *NetIPNet) String() string { + return (*net.IPNet)(ipnet).String() +} + +// MarshalJSON returns the JSON representation of the IPNet +func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) { + return json.Marshal((*net.IPNet)(ipnet).String()) +} + +// UnmarshalJSON sets the IPNet from a byte array of JSON +func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) { + var ipnetStr string + if err = json.Unmarshal(b, &ipnetStr); err == nil { + var cidr *net.IPNet + if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil { + *ipnet = NetIPNet(*cidr) + } + } + return +} + +// IndexInfo contains information about a registry +// +// RepositoryInfo Examples: +// { +// "Index" : { +// "Name" : "docker.io", +// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"], +// "Secure" : true, +// "Official" : true, +// }, +// "RemoteName" : "library/debian", +// "LocalName" : "debian", +// "CanonicalName" : "docker.io/debian" +// "Official" : true, +// } +// +// { +// "Index" : { +// "Name" : "127.0.0.1:5000", +// "Mirrors" : [], +// "Secure" : false, +// "Official" : false, +// }, +// "RemoteName" : "user/repo", +// "LocalName" : "127.0.0.1:5000/user/repo", +// "CanonicalName" : "127.0.0.1:5000/user/repo", +// "Official" : false, +// } +type IndexInfo struct { + // Name is the name of the registry, such as "docker.io" + Name string + // Mirrors is a list of mirrors, expressed as URIs + Mirrors []string + // Secure is set to false if the registry is part of the list of + // insecure registries. Insecure registries accept HTTP and/or accept + // HTTPS with certificates from unknown CAs. + Secure bool + // Official indicates whether this is an official registry + Official bool +} + +// SearchResult describes a search result returned from a registry +type SearchResult struct { + // StarCount indicates the number of stars this repository has + StarCount int `json:"star_count"` + // IsOfficial is true if the result is from an official repository. + IsOfficial bool `json:"is_official"` + // Name is the name of the repository + Name string `json:"name"` + // IsAutomated indicates whether the result is automated + IsAutomated bool `json:"is_automated"` + // Description is a textual description of the repository + Description string `json:"description"` +} + +// SearchResults lists a collection search results returned from a registry +type SearchResults struct { + // Query contains the query string that generated the search results + Query string `json:"query"` + // NumResults indicates the number of results the query returned + NumResults int `json:"num_results"` + // Results is a slice containing the actual results for the search + Results []SearchResult `json:"results"` +} + +// DistributionInspect describes the result obtained from contacting the +// registry to retrieve image metadata +type DistributionInspect struct { + // Descriptor contains information about the manifest, including + // the content addressable digest + Descriptor v1.Descriptor + // Platforms contains the list of platforms supported by the image, + // obtained by parsing the manifest + Platforms []v1.Platform +} diff --git a/vendor/github.com/docker/docker/api/types/service_update_response.go b/vendor/github.com/docker/docker/api/types/service_update_response.go new file mode 100644 index 0000000000..74ea64b1bb --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/service_update_response.go @@ -0,0 +1,12 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// ServiceUpdateResponse service update response +// swagger:model ServiceUpdateResponse +type ServiceUpdateResponse struct { + + // Optional warning messages + Warnings []string `json:"Warnings"` +} diff --git a/vendor/github.com/docker/docker/api/types/stats.go b/vendor/github.com/docker/docker/api/types/stats.go new file mode 100644 index 0000000000..20daebed14 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/stats.go @@ -0,0 +1,181 @@ +// Package types is used for API stability in the types and response to the +// consumers of the API stats endpoint. +package types // import "github.com/docker/docker/api/types" + +import "time" + +// ThrottlingData stores CPU throttling stats of one running container. +// Not used on Windows. +type ThrottlingData struct { + // Number of periods with throttling active + Periods uint64 `json:"periods"` + // Number of periods when the container hits its throttling limit. + ThrottledPeriods uint64 `json:"throttled_periods"` + // Aggregate time the container was throttled for in nanoseconds. + ThrottledTime uint64 `json:"throttled_time"` +} + +// CPUUsage stores All CPU stats aggregated since container inception. +type CPUUsage struct { + // Total CPU time consumed. + // Units: nanoseconds (Linux) + // Units: 100's of nanoseconds (Windows) + TotalUsage uint64 `json:"total_usage"` + + // Total CPU time consumed per core (Linux). Not used on Windows. + // Units: nanoseconds. + PercpuUsage []uint64 `json:"percpu_usage,omitempty"` + + // Time spent by tasks of the cgroup in kernel mode (Linux). + // Time spent by all container processes in kernel mode (Windows). + // Units: nanoseconds (Linux). + // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers. + UsageInKernelmode uint64 `json:"usage_in_kernelmode"` + + // Time spent by tasks of the cgroup in user mode (Linux). + // Time spent by all container processes in user mode (Windows). + // Units: nanoseconds (Linux). + // Units: 100's of nanoseconds (Windows). Not populated for Hyper-V Containers + UsageInUsermode uint64 `json:"usage_in_usermode"` +} + +// CPUStats aggregates and wraps all CPU related info of container +type CPUStats struct { + // CPU Usage. Linux and Windows. + CPUUsage CPUUsage `json:"cpu_usage"` + + // System Usage. Linux only. + SystemUsage uint64 `json:"system_cpu_usage,omitempty"` + + // Online CPUs. Linux only. + OnlineCPUs uint32 `json:"online_cpus,omitempty"` + + // Throttling Data. Linux only. + ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` +} + +// MemoryStats aggregates all memory stats since container inception on Linux. +// Windows returns stats for commit and private working set only. +type MemoryStats struct { + // Linux Memory Stats + + // current res_counter usage for memory + Usage uint64 `json:"usage,omitempty"` + // maximum usage ever recorded. + MaxUsage uint64 `json:"max_usage,omitempty"` + // TODO(vishh): Export these as stronger types. + // all the stats exported via memory.stat. + Stats map[string]uint64 `json:"stats,omitempty"` + // number of times memory usage hits limits. + Failcnt uint64 `json:"failcnt,omitempty"` + Limit uint64 `json:"limit,omitempty"` + + // Windows Memory Stats + // See https://technet.microsoft.com/en-us/magazine/ff382715.aspx + + // committed bytes + Commit uint64 `json:"commitbytes,omitempty"` + // peak committed bytes + CommitPeak uint64 `json:"commitpeakbytes,omitempty"` + // private working set + PrivateWorkingSet uint64 `json:"privateworkingset,omitempty"` +} + +// BlkioStatEntry is one small entity to store a piece of Blkio stats +// Not used on Windows. +type BlkioStatEntry struct { + Major uint64 `json:"major"` + Minor uint64 `json:"minor"` + Op string `json:"op"` + Value uint64 `json:"value"` +} + +// BlkioStats stores All IO service stats for data read and write. +// This is a Linux specific structure as the differences between expressing +// block I/O on Windows and Linux are sufficiently significant to make +// little sense attempting to morph into a combined structure. +type BlkioStats struct { + // number of bytes transferred to and from the block device + IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"` + IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"` + IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"` + IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"` + IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"` + IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"` + IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"` + SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"` +} + +// StorageStats is the disk I/O stats for read/write on Windows. +type StorageStats struct { + ReadCountNormalized uint64 `json:"read_count_normalized,omitempty"` + ReadSizeBytes uint64 `json:"read_size_bytes,omitempty"` + WriteCountNormalized uint64 `json:"write_count_normalized,omitempty"` + WriteSizeBytes uint64 `json:"write_size_bytes,omitempty"` +} + +// NetworkStats aggregates the network stats of one container +type NetworkStats struct { + // Bytes received. Windows and Linux. + RxBytes uint64 `json:"rx_bytes"` + // Packets received. Windows and Linux. + RxPackets uint64 `json:"rx_packets"` + // Received errors. Not used on Windows. Note that we don't `omitempty` this + // field as it is expected in the >=v1.21 API stats structure. + RxErrors uint64 `json:"rx_errors"` + // Incoming packets dropped. Windows and Linux. + RxDropped uint64 `json:"rx_dropped"` + // Bytes sent. Windows and Linux. + TxBytes uint64 `json:"tx_bytes"` + // Packets sent. Windows and Linux. + TxPackets uint64 `json:"tx_packets"` + // Sent errors. Not used on Windows. Note that we don't `omitempty` this + // field as it is expected in the >=v1.21 API stats structure. + TxErrors uint64 `json:"tx_errors"` + // Outgoing packets dropped. Windows and Linux. + TxDropped uint64 `json:"tx_dropped"` + // Endpoint ID. Not used on Linux. + EndpointID string `json:"endpoint_id,omitempty"` + // Instance ID. Not used on Linux. + InstanceID string `json:"instance_id,omitempty"` +} + +// PidsStats contains the stats of a container's pids +type PidsStats struct { + // Current is the number of pids in the cgroup + Current uint64 `json:"current,omitempty"` + // Limit is the hard limit on the number of pids in the cgroup. + // A "Limit" of 0 means that there is no limit. + Limit uint64 `json:"limit,omitempty"` +} + +// Stats is Ultimate struct aggregating all types of stats of one container +type Stats struct { + // Common stats + Read time.Time `json:"read"` + PreRead time.Time `json:"preread"` + + // Linux specific stats, not populated on Windows. + PidsStats PidsStats `json:"pids_stats,omitempty"` + BlkioStats BlkioStats `json:"blkio_stats,omitempty"` + + // Windows specific stats, not populated on Linux. + NumProcs uint32 `json:"num_procs"` + StorageStats StorageStats `json:"storage_stats,omitempty"` + + // Shared stats + CPUStats CPUStats `json:"cpu_stats,omitempty"` + PreCPUStats CPUStats `json:"precpu_stats,omitempty"` // "Pre"="Previous" + MemoryStats MemoryStats `json:"memory_stats,omitempty"` +} + +// StatsJSON is newly used Networks +type StatsJSON struct { + Stats + + Name string `json:"name,omitempty"` + ID string `json:"id,omitempty"` + + // Networks request version >=1.21 + Networks map[string]NetworkStats `json:"networks,omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/strslice/strslice.go b/vendor/github.com/docker/docker/api/types/strslice/strslice.go new file mode 100644 index 0000000000..82921cebc1 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/strslice/strslice.go @@ -0,0 +1,30 @@ +package strslice // import "github.com/docker/docker/api/types/strslice" + +import "encoding/json" + +// StrSlice represents a string or an array of strings. +// We need to override the json decoder to accept both options. +type StrSlice []string + +// UnmarshalJSON decodes the byte slice whether it's a string or an array of +// strings. This method is needed to implement json.Unmarshaler. +func (e *StrSlice) UnmarshalJSON(b []byte) error { + if len(b) == 0 { + // With no input, we preserve the existing value by returning nil and + // leaving the target alone. This allows defining default values for + // the type. + return nil + } + + p := make([]string, 0, 1) + if err := json.Unmarshal(b, &p); err != nil { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + p = append(p, s) + } + + *e = p + return nil +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/common.go b/vendor/github.com/docker/docker/api/types/swarm/common.go new file mode 100644 index 0000000000..ef020f458b --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/common.go @@ -0,0 +1,40 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import "time" + +// Version represents the internal object version. +type Version struct { + Index uint64 `json:",omitempty"` +} + +// Meta is a base object inherited by most of the other once. +type Meta struct { + Version Version `json:",omitempty"` + CreatedAt time.Time `json:",omitempty"` + UpdatedAt time.Time `json:",omitempty"` +} + +// Annotations represents how to describe an object. +type Annotations struct { + Name string `json:",omitempty"` + Labels map[string]string `json:"Labels"` +} + +// Driver represents a driver (network, logging, secrets backend). +type Driver struct { + Name string `json:",omitempty"` + Options map[string]string `json:",omitempty"` +} + +// TLSInfo represents the TLS information about what CA certificate is trusted, +// and who the issuer for a TLS certificate is +type TLSInfo struct { + // TrustRoot is the trusted CA root certificate in PEM format + TrustRoot string `json:",omitempty"` + + // CertIssuer is the raw subject bytes of the issuer + CertIssuerSubject []byte `json:",omitempty"` + + // CertIssuerPublicKey is the raw public key bytes of the issuer + CertIssuerPublicKey []byte `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/config.go b/vendor/github.com/docker/docker/api/types/swarm/config.go new file mode 100644 index 0000000000..16202ccce6 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/config.go @@ -0,0 +1,40 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import "os" + +// Config represents a config. +type Config struct { + ID string + Meta + Spec ConfigSpec +} + +// ConfigSpec represents a config specification from a config in swarm +type ConfigSpec struct { + Annotations + Data []byte `json:",omitempty"` + + // Templating controls whether and how to evaluate the config payload as + // a template. If it is not set, no templating is used. + Templating *Driver `json:",omitempty"` +} + +// ConfigReferenceFileTarget is a file target in a config reference +type ConfigReferenceFileTarget struct { + Name string + UID string + GID string + Mode os.FileMode +} + +// ConfigReferenceRuntimeTarget is a target for a config specifying that it +// isn't mounted into the container but instead has some other purpose. +type ConfigReferenceRuntimeTarget struct{} + +// ConfigReference is a reference to a config in swarm +type ConfigReference struct { + File *ConfigReferenceFileTarget `json:",omitempty"` + Runtime *ConfigReferenceRuntimeTarget `json:",omitempty"` + ConfigID string + ConfigName string +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/container.go b/vendor/github.com/docker/docker/api/types/swarm/container.go new file mode 100644 index 0000000000..af5e1c0bc2 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/container.go @@ -0,0 +1,80 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import ( + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/mount" + "github.com/docker/go-units" +) + +// DNSConfig specifies DNS related configurations in resolver configuration file (resolv.conf) +// Detailed documentation is available in: +// http://man7.org/linux/man-pages/man5/resolv.conf.5.html +// `nameserver`, `search`, `options` have been supported. +// TODO: `domain` is not supported yet. +type DNSConfig struct { + // Nameservers specifies the IP addresses of the name servers + Nameservers []string `json:",omitempty"` + // Search specifies the search list for host-name lookup + Search []string `json:",omitempty"` + // Options allows certain internal resolver variables to be modified + Options []string `json:",omitempty"` +} + +// SELinuxContext contains the SELinux labels of the container. +type SELinuxContext struct { + Disable bool + + User string + Role string + Type string + Level string +} + +// CredentialSpec for managed service account (Windows only) +type CredentialSpec struct { + Config string + File string + Registry string +} + +// Privileges defines the security options for the container. +type Privileges struct { + CredentialSpec *CredentialSpec + SELinuxContext *SELinuxContext +} + +// ContainerSpec represents the spec of a container. +type ContainerSpec struct { + Image string `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + Command []string `json:",omitempty"` + Args []string `json:",omitempty"` + Hostname string `json:",omitempty"` + Env []string `json:",omitempty"` + Dir string `json:",omitempty"` + User string `json:",omitempty"` + Groups []string `json:",omitempty"` + Privileges *Privileges `json:",omitempty"` + Init *bool `json:",omitempty"` + StopSignal string `json:",omitempty"` + TTY bool `json:",omitempty"` + OpenStdin bool `json:",omitempty"` + ReadOnly bool `json:",omitempty"` + Mounts []mount.Mount `json:",omitempty"` + StopGracePeriod *time.Duration `json:",omitempty"` + Healthcheck *container.HealthConfig `json:",omitempty"` + // The format of extra hosts on swarmkit is specified in: + // http://man7.org/linux/man-pages/man5/hosts.5.html + // IP_address canonical_hostname [aliases...] + Hosts []string `json:",omitempty"` + DNSConfig *DNSConfig `json:",omitempty"` + Secrets []*SecretReference `json:",omitempty"` + Configs []*ConfigReference `json:",omitempty"` + Isolation container.Isolation `json:",omitempty"` + Sysctls map[string]string `json:",omitempty"` + CapabilityAdd []string `json:",omitempty"` + CapabilityDrop []string `json:",omitempty"` + Ulimits []*units.Ulimit `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/network.go b/vendor/github.com/docker/docker/api/types/swarm/network.go new file mode 100644 index 0000000000..98ef3284d1 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/network.go @@ -0,0 +1,121 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import ( + "github.com/docker/docker/api/types/network" +) + +// Endpoint represents an endpoint. +type Endpoint struct { + Spec EndpointSpec `json:",omitempty"` + Ports []PortConfig `json:",omitempty"` + VirtualIPs []EndpointVirtualIP `json:",omitempty"` +} + +// EndpointSpec represents the spec of an endpoint. +type EndpointSpec struct { + Mode ResolutionMode `json:",omitempty"` + Ports []PortConfig `json:",omitempty"` +} + +// ResolutionMode represents a resolution mode. +type ResolutionMode string + +const ( + // ResolutionModeVIP VIP + ResolutionModeVIP ResolutionMode = "vip" + // ResolutionModeDNSRR DNSRR + ResolutionModeDNSRR ResolutionMode = "dnsrr" +) + +// PortConfig represents the config of a port. +type PortConfig struct { + Name string `json:",omitempty"` + Protocol PortConfigProtocol `json:",omitempty"` + // TargetPort is the port inside the container + TargetPort uint32 `json:",omitempty"` + // PublishedPort is the port on the swarm hosts + PublishedPort uint32 `json:",omitempty"` + // PublishMode is the mode in which port is published + PublishMode PortConfigPublishMode `json:",omitempty"` +} + +// PortConfigPublishMode represents the mode in which the port is to +// be published. +type PortConfigPublishMode string + +const ( + // PortConfigPublishModeIngress is used for ports published + // for ingress load balancing using routing mesh. + PortConfigPublishModeIngress PortConfigPublishMode = "ingress" + // PortConfigPublishModeHost is used for ports published + // for direct host level access on the host where the task is running. + PortConfigPublishModeHost PortConfigPublishMode = "host" +) + +// PortConfigProtocol represents the protocol of a port. +type PortConfigProtocol string + +const ( + // TODO(stevvooe): These should be used generally, not just for PortConfig. + + // PortConfigProtocolTCP TCP + PortConfigProtocolTCP PortConfigProtocol = "tcp" + // PortConfigProtocolUDP UDP + PortConfigProtocolUDP PortConfigProtocol = "udp" + // PortConfigProtocolSCTP SCTP + PortConfigProtocolSCTP PortConfigProtocol = "sctp" +) + +// EndpointVirtualIP represents the virtual ip of a port. +type EndpointVirtualIP struct { + NetworkID string `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// Network represents a network. +type Network struct { + ID string + Meta + Spec NetworkSpec `json:",omitempty"` + DriverState Driver `json:",omitempty"` + IPAMOptions *IPAMOptions `json:",omitempty"` +} + +// NetworkSpec represents the spec of a network. +type NetworkSpec struct { + Annotations + DriverConfiguration *Driver `json:",omitempty"` + IPv6Enabled bool `json:",omitempty"` + Internal bool `json:",omitempty"` + Attachable bool `json:",omitempty"` + Ingress bool `json:",omitempty"` + IPAMOptions *IPAMOptions `json:",omitempty"` + ConfigFrom *network.ConfigReference `json:",omitempty"` + Scope string `json:",omitempty"` +} + +// NetworkAttachmentConfig represents the configuration of a network attachment. +type NetworkAttachmentConfig struct { + Target string `json:",omitempty"` + Aliases []string `json:",omitempty"` + DriverOpts map[string]string `json:",omitempty"` +} + +// NetworkAttachment represents a network attachment. +type NetworkAttachment struct { + Network Network `json:",omitempty"` + Addresses []string `json:",omitempty"` +} + +// IPAMOptions represents ipam options. +type IPAMOptions struct { + Driver Driver `json:",omitempty"` + Configs []IPAMConfig `json:",omitempty"` +} + +// IPAMConfig represents ipam configuration. +type IPAMConfig struct { + Subnet string `json:",omitempty"` + Range string `json:",omitempty"` + Gateway string `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/node.go b/vendor/github.com/docker/docker/api/types/swarm/node.go new file mode 100644 index 0000000000..1e30f5fa10 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/node.go @@ -0,0 +1,115 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +// Node represents a node. +type Node struct { + ID string + Meta + // Spec defines the desired state of the node as specified by the user. + // The system will honor this and will *never* modify it. + Spec NodeSpec `json:",omitempty"` + // Description encapsulates the properties of the Node as reported by the + // agent. + Description NodeDescription `json:",omitempty"` + // Status provides the current status of the node, as seen by the manager. + Status NodeStatus `json:",omitempty"` + // ManagerStatus provides the current status of the node's manager + // component, if the node is a manager. + ManagerStatus *ManagerStatus `json:",omitempty"` +} + +// NodeSpec represents the spec of a node. +type NodeSpec struct { + Annotations + Role NodeRole `json:",omitempty"` + Availability NodeAvailability `json:",omitempty"` +} + +// NodeRole represents the role of a node. +type NodeRole string + +const ( + // NodeRoleWorker WORKER + NodeRoleWorker NodeRole = "worker" + // NodeRoleManager MANAGER + NodeRoleManager NodeRole = "manager" +) + +// NodeAvailability represents the availability of a node. +type NodeAvailability string + +const ( + // NodeAvailabilityActive ACTIVE + NodeAvailabilityActive NodeAvailability = "active" + // NodeAvailabilityPause PAUSE + NodeAvailabilityPause NodeAvailability = "pause" + // NodeAvailabilityDrain DRAIN + NodeAvailabilityDrain NodeAvailability = "drain" +) + +// NodeDescription represents the description of a node. +type NodeDescription struct { + Hostname string `json:",omitempty"` + Platform Platform `json:",omitempty"` + Resources Resources `json:",omitempty"` + Engine EngineDescription `json:",omitempty"` + TLSInfo TLSInfo `json:",omitempty"` +} + +// Platform represents the platform (Arch/OS). +type Platform struct { + Architecture string `json:",omitempty"` + OS string `json:",omitempty"` +} + +// EngineDescription represents the description of an engine. +type EngineDescription struct { + EngineVersion string `json:",omitempty"` + Labels map[string]string `json:",omitempty"` + Plugins []PluginDescription `json:",omitempty"` +} + +// PluginDescription represents the description of an engine plugin. +type PluginDescription struct { + Type string `json:",omitempty"` + Name string `json:",omitempty"` +} + +// NodeStatus represents the status of a node. +type NodeStatus struct { + State NodeState `json:",omitempty"` + Message string `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// Reachability represents the reachability of a node. +type Reachability string + +const ( + // ReachabilityUnknown UNKNOWN + ReachabilityUnknown Reachability = "unknown" + // ReachabilityUnreachable UNREACHABLE + ReachabilityUnreachable Reachability = "unreachable" + // ReachabilityReachable REACHABLE + ReachabilityReachable Reachability = "reachable" +) + +// ManagerStatus represents the status of a manager. +type ManagerStatus struct { + Leader bool `json:",omitempty"` + Reachability Reachability `json:",omitempty"` + Addr string `json:",omitempty"` +} + +// NodeState represents the state of a node. +type NodeState string + +const ( + // NodeStateUnknown UNKNOWN + NodeStateUnknown NodeState = "unknown" + // NodeStateDown DOWN + NodeStateDown NodeState = "down" + // NodeStateReady READY + NodeStateReady NodeState = "ready" + // NodeStateDisconnected DISCONNECTED + NodeStateDisconnected NodeState = "disconnected" +) diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime.go b/vendor/github.com/docker/docker/api/types/swarm/runtime.go new file mode 100644 index 0000000000..0c77403ccf --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime.go @@ -0,0 +1,27 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +// RuntimeType is the type of runtime used for the TaskSpec +type RuntimeType string + +// RuntimeURL is the proto type url +type RuntimeURL string + +const ( + // RuntimeContainer is the container based runtime + RuntimeContainer RuntimeType = "container" + // RuntimePlugin is the plugin based runtime + RuntimePlugin RuntimeType = "plugin" + // RuntimeNetworkAttachment is the network attachment runtime + RuntimeNetworkAttachment RuntimeType = "attachment" + + // RuntimeURLContainer is the proto url for the container type + RuntimeURLContainer RuntimeURL = "types.docker.com/RuntimeContainer" + // RuntimeURLPlugin is the proto url for the plugin type + RuntimeURLPlugin RuntimeURL = "types.docker.com/RuntimePlugin" +) + +// NetworkAttachmentSpec represents the runtime spec type for network +// attachment tasks +type NetworkAttachmentSpec struct { + ContainerID string +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go new file mode 100644 index 0000000000..98c2806c31 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/gen.go @@ -0,0 +1,3 @@ +//go:generate protoc -I . --gogofast_out=import_path=github.com/docker/docker/api/types/swarm/runtime:. plugin.proto + +package runtime // import "github.com/docker/docker/api/types/swarm/runtime" diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go new file mode 100644 index 0000000000..e45045866a --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.pb.go @@ -0,0 +1,754 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: plugin.proto + +/* + Package runtime is a generated protocol buffer package. + + It is generated from these files: + plugin.proto + + It has these top-level messages: + PluginSpec + PluginPrivilege +*/ +package runtime + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// PluginSpec defines the base payload which clients can specify for creating +// a service with the plugin runtime. +type PluginSpec struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Remote string `protobuf:"bytes,2,opt,name=remote,proto3" json:"remote,omitempty"` + Privileges []*PluginPrivilege `protobuf:"bytes,3,rep,name=privileges" json:"privileges,omitempty"` + Disabled bool `protobuf:"varint,4,opt,name=disabled,proto3" json:"disabled,omitempty"` + Env []string `protobuf:"bytes,5,rep,name=env" json:"env,omitempty"` +} + +func (m *PluginSpec) Reset() { *m = PluginSpec{} } +func (m *PluginSpec) String() string { return proto.CompactTextString(m) } +func (*PluginSpec) ProtoMessage() {} +func (*PluginSpec) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{0} } + +func (m *PluginSpec) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *PluginSpec) GetRemote() string { + if m != nil { + return m.Remote + } + return "" +} + +func (m *PluginSpec) GetPrivileges() []*PluginPrivilege { + if m != nil { + return m.Privileges + } + return nil +} + +func (m *PluginSpec) GetDisabled() bool { + if m != nil { + return m.Disabled + } + return false +} + +func (m *PluginSpec) GetEnv() []string { + if m != nil { + return m.Env + } + return nil +} + +// PluginPrivilege describes a permission the user has to accept +// upon installing a plugin. +type PluginPrivilege struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Description string `protobuf:"bytes,2,opt,name=description,proto3" json:"description,omitempty"` + Value []string `protobuf:"bytes,3,rep,name=value" json:"value,omitempty"` +} + +func (m *PluginPrivilege) Reset() { *m = PluginPrivilege{} } +func (m *PluginPrivilege) String() string { return proto.CompactTextString(m) } +func (*PluginPrivilege) ProtoMessage() {} +func (*PluginPrivilege) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{1} } + +func (m *PluginPrivilege) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *PluginPrivilege) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *PluginPrivilege) GetValue() []string { + if m != nil { + return m.Value + } + return nil +} + +func init() { + proto.RegisterType((*PluginSpec)(nil), "PluginSpec") + proto.RegisterType((*PluginPrivilege)(nil), "PluginPrivilege") +} +func (m *PluginSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PluginSpec) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Remote) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Remote))) + i += copy(dAtA[i:], m.Remote) + } + if len(m.Privileges) > 0 { + for _, msg := range m.Privileges { + dAtA[i] = 0x1a + i++ + i = encodeVarintPlugin(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Disabled { + dAtA[i] = 0x20 + i++ + if m.Disabled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Env) > 0 { + for _, s := range m.Env { + dAtA[i] = 0x2a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *PluginPrivilege) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PluginPrivilege) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Description) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintPlugin(dAtA, i, uint64(len(m.Description))) + i += copy(dAtA[i:], m.Description) + } + if len(m.Value) > 0 { + for _, s := range m.Value { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func encodeVarintPlugin(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *PluginSpec) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + l = len(m.Remote) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + if len(m.Privileges) > 0 { + for _, e := range m.Privileges { + l = e.Size() + n += 1 + l + sovPlugin(uint64(l)) + } + } + if m.Disabled { + n += 2 + } + if len(m.Env) > 0 { + for _, s := range m.Env { + l = len(s) + n += 1 + l + sovPlugin(uint64(l)) + } + } + return n +} + +func (m *PluginPrivilege) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + l = len(m.Description) + if l > 0 { + n += 1 + l + sovPlugin(uint64(l)) + } + if len(m.Value) > 0 { + for _, s := range m.Value { + l = len(s) + n += 1 + l + sovPlugin(uint64(l)) + } + } + return n +} + +func sovPlugin(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozPlugin(x uint64) (n int) { + return sovPlugin(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *PluginSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PluginSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PluginSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Remote", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Remote = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Privileges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Privileges = append(m.Privileges, &PluginPrivilege{}) + if err := m.Privileges[len(m.Privileges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Disabled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Disabled = bool(v != 0) + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPlugin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPlugin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PluginPrivilege) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PluginPrivilege: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PluginPrivilege: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Description", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Description = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPlugin + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPlugin + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPlugin(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPlugin + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipPlugin(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthPlugin + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPlugin + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipPlugin(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthPlugin = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowPlugin = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("plugin.proto", fileDescriptorPlugin) } + +var fileDescriptorPlugin = []byte{ + // 256 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x90, 0x4d, 0x4b, 0xc3, 0x30, + 0x18, 0xc7, 0x89, 0xdd, 0xc6, 0xfa, 0x4c, 0x70, 0x04, 0x91, 0xe2, 0xa1, 0x94, 0x9d, 0x7a, 0x6a, + 0x45, 0x2f, 0x82, 0x37, 0x0f, 0x9e, 0x47, 0xbc, 0x09, 0x1e, 0xd2, 0xf6, 0xa1, 0x06, 0x9b, 0x17, + 0x92, 0xb4, 0xe2, 0x37, 0xf1, 0x23, 0x79, 0xf4, 0x23, 0x48, 0x3f, 0x89, 0x98, 0x75, 0x32, 0x64, + 0xa7, 0xff, 0x4b, 0xc2, 0x9f, 0x1f, 0x0f, 0x9c, 0x9a, 0xae, 0x6f, 0x85, 0x2a, 0x8c, 0xd5, 0x5e, + 0x6f, 0x3e, 0x08, 0xc0, 0x36, 0x14, 0x8f, 0x06, 0x6b, 0x4a, 0x61, 0xa6, 0xb8, 0xc4, 0x84, 0x64, + 0x24, 0x8f, 0x59, 0xf0, 0xf4, 0x02, 0x16, 0x16, 0xa5, 0xf6, 0x98, 0x9c, 0x84, 0x76, 0x4a, 0xf4, + 0x0a, 0xc0, 0x58, 0x31, 0x88, 0x0e, 0x5b, 0x74, 0x49, 0x94, 0x45, 0xf9, 0xea, 0x7a, 0x5d, 0xec, + 0xc6, 0xb6, 0xfb, 0x07, 0x76, 0xf0, 0x87, 0x5e, 0xc2, 0xb2, 0x11, 0x8e, 0x57, 0x1d, 0x36, 0xc9, + 0x2c, 0x23, 0xf9, 0x92, 0xfd, 0x65, 0xba, 0x86, 0x08, 0xd5, 0x90, 0xcc, 0xb3, 0x28, 0x8f, 0xd9, + 0xaf, 0xdd, 0x3c, 0xc3, 0xd9, 0xbf, 0xb1, 0xa3, 0x78, 0x19, 0xac, 0x1a, 0x74, 0xb5, 0x15, 0xc6, + 0x0b, 0xad, 0x26, 0xc6, 0xc3, 0x8a, 0x9e, 0xc3, 0x7c, 0xe0, 0x5d, 0x8f, 0x81, 0x31, 0x66, 0xbb, + 0x70, 0xff, 0xf0, 0x39, 0xa6, 0xe4, 0x6b, 0x4c, 0xc9, 0xf7, 0x98, 0x92, 0xa7, 0xdb, 0x56, 0xf8, + 0x97, 0xbe, 0x2a, 0x6a, 0x2d, 0xcb, 0x46, 0xd7, 0xaf, 0x68, 0xf7, 0xc2, 0x8d, 0x28, 0xfd, 0xbb, + 0x41, 0x57, 0xba, 0x37, 0x6e, 0x65, 0x69, 0x7b, 0xe5, 0x85, 0xc4, 0xbb, 0x49, 0xab, 0x45, 0x38, + 0xe4, 0xcd, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x99, 0xa8, 0xd9, 0x9b, 0x58, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto new file mode 100644 index 0000000000..9ef169046b --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/runtime/plugin.proto @@ -0,0 +1,21 @@ +syntax = "proto3"; + +option go_package = "github.com/docker/docker/api/types/swarm/runtime;runtime"; + +// PluginSpec defines the base payload which clients can specify for creating +// a service with the plugin runtime. +message PluginSpec { + string name = 1; + string remote = 2; + repeated PluginPrivilege privileges = 3; + bool disabled = 4; + repeated string env = 5; +} + +// PluginPrivilege describes a permission the user has to accept +// upon installing a plugin. +message PluginPrivilege { + string name = 1; + string description = 2; + repeated string value = 3; +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/secret.go b/vendor/github.com/docker/docker/api/types/swarm/secret.go new file mode 100644 index 0000000000..d5213ec981 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/secret.go @@ -0,0 +1,36 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import "os" + +// Secret represents a secret. +type Secret struct { + ID string + Meta + Spec SecretSpec +} + +// SecretSpec represents a secret specification from a secret in swarm +type SecretSpec struct { + Annotations + Data []byte `json:",omitempty"` + Driver *Driver `json:",omitempty"` // name of the secrets driver used to fetch the secret's value from an external secret store + + // Templating controls whether and how to evaluate the secret payload as + // a template. If it is not set, no templating is used. + Templating *Driver `json:",omitempty"` +} + +// SecretReferenceFileTarget is a file target in a secret reference +type SecretReferenceFileTarget struct { + Name string + UID string + GID string + Mode os.FileMode +} + +// SecretReference is a reference to a secret in swarm +type SecretReference struct { + File *SecretReferenceFileTarget + SecretID string + SecretName string +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/service.go b/vendor/github.com/docker/docker/api/types/swarm/service.go new file mode 100644 index 0000000000..6eb452d24d --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/service.go @@ -0,0 +1,202 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import "time" + +// Service represents a service. +type Service struct { + ID string + Meta + Spec ServiceSpec `json:",omitempty"` + PreviousSpec *ServiceSpec `json:",omitempty"` + Endpoint Endpoint `json:",omitempty"` + UpdateStatus *UpdateStatus `json:",omitempty"` + + // ServiceStatus is an optional, extra field indicating the number of + // desired and running tasks. It is provided primarily as a shortcut to + // calculating these values client-side, which otherwise would require + // listing all tasks for a service, an operation that could be + // computation and network expensive. + ServiceStatus *ServiceStatus `json:",omitempty"` + + // JobStatus is the status of a Service which is in one of ReplicatedJob or + // GlobalJob modes. It is absent on Replicated and Global services. + JobStatus *JobStatus `json:",omitempty"` +} + +// ServiceSpec represents the spec of a service. +type ServiceSpec struct { + Annotations + + // TaskTemplate defines how the service should construct new tasks when + // orchestrating this service. + TaskTemplate TaskSpec `json:",omitempty"` + Mode ServiceMode `json:",omitempty"` + UpdateConfig *UpdateConfig `json:",omitempty"` + RollbackConfig *UpdateConfig `json:",omitempty"` + + // Networks field in ServiceSpec is deprecated. The + // same field in TaskSpec should be used instead. + // This field will be removed in a future release. + Networks []NetworkAttachmentConfig `json:",omitempty"` + EndpointSpec *EndpointSpec `json:",omitempty"` +} + +// ServiceMode represents the mode of a service. +type ServiceMode struct { + Replicated *ReplicatedService `json:",omitempty"` + Global *GlobalService `json:",omitempty"` + ReplicatedJob *ReplicatedJob `json:",omitempty"` + GlobalJob *GlobalJob `json:",omitempty"` +} + +// UpdateState is the state of a service update. +type UpdateState string + +const ( + // UpdateStateUpdating is the updating state. + UpdateStateUpdating UpdateState = "updating" + // UpdateStatePaused is the paused state. + UpdateStatePaused UpdateState = "paused" + // UpdateStateCompleted is the completed state. + UpdateStateCompleted UpdateState = "completed" + // UpdateStateRollbackStarted is the state with a rollback in progress. + UpdateStateRollbackStarted UpdateState = "rollback_started" + // UpdateStateRollbackPaused is the state with a rollback in progress. + UpdateStateRollbackPaused UpdateState = "rollback_paused" + // UpdateStateRollbackCompleted is the state with a rollback in progress. + UpdateStateRollbackCompleted UpdateState = "rollback_completed" +) + +// UpdateStatus reports the status of a service update. +type UpdateStatus struct { + State UpdateState `json:",omitempty"` + StartedAt *time.Time `json:",omitempty"` + CompletedAt *time.Time `json:",omitempty"` + Message string `json:",omitempty"` +} + +// ReplicatedService is a kind of ServiceMode. +type ReplicatedService struct { + Replicas *uint64 `json:",omitempty"` +} + +// GlobalService is a kind of ServiceMode. +type GlobalService struct{} + +// ReplicatedJob is the a type of Service which executes a defined Tasks +// in parallel until the specified number of Tasks have succeeded. +type ReplicatedJob struct { + // MaxConcurrent indicates the maximum number of Tasks that should be + // executing simultaneously for this job at any given time. There may be + // fewer Tasks that MaxConcurrent executing simultaneously; for example, if + // there are fewer than MaxConcurrent tasks needed to reach + // TotalCompletions. + // + // If this field is empty, it will default to a max concurrency of 1. + MaxConcurrent *uint64 `json:",omitempty"` + + // TotalCompletions is the total number of Tasks desired to run to + // completion. + // + // If this field is empty, the value of MaxConcurrent will be used. + TotalCompletions *uint64 `json:",omitempty"` +} + +// GlobalJob is the type of a Service which executes a Task on every Node +// matching the Service's placement constraints. These tasks run to completion +// and then exit. +// +// This type is deliberately empty. +type GlobalJob struct{} + +const ( + // UpdateFailureActionPause PAUSE + UpdateFailureActionPause = "pause" + // UpdateFailureActionContinue CONTINUE + UpdateFailureActionContinue = "continue" + // UpdateFailureActionRollback ROLLBACK + UpdateFailureActionRollback = "rollback" + + // UpdateOrderStopFirst STOP_FIRST + UpdateOrderStopFirst = "stop-first" + // UpdateOrderStartFirst START_FIRST + UpdateOrderStartFirst = "start-first" +) + +// UpdateConfig represents the update configuration. +type UpdateConfig struct { + // Maximum number of tasks to be updated in one iteration. + // 0 means unlimited parallelism. + Parallelism uint64 + + // Amount of time between updates. + Delay time.Duration `json:",omitempty"` + + // FailureAction is the action to take when an update failures. + FailureAction string `json:",omitempty"` + + // Monitor indicates how long to monitor a task for failure after it is + // created. If the task fails by ending up in one of the states + // REJECTED, COMPLETED, or FAILED, within Monitor from its creation, + // this counts as a failure. If it fails after Monitor, it does not + // count as a failure. If Monitor is unspecified, a default value will + // be used. + Monitor time.Duration `json:",omitempty"` + + // MaxFailureRatio is the fraction of tasks that may fail during + // an update before the failure action is invoked. Any task created by + // the current update which ends up in one of the states REJECTED, + // COMPLETED or FAILED within Monitor from its creation counts as a + // failure. The number of failures is divided by the number of tasks + // being updated, and if this fraction is greater than + // MaxFailureRatio, the failure action is invoked. + // + // If the failure action is CONTINUE, there is no effect. + // If the failure action is PAUSE, no more tasks will be updated until + // another update is started. + MaxFailureRatio float32 + + // Order indicates the order of operations when rolling out an updated + // task. Either the old task is shut down before the new task is + // started, or the new task is started before the old task is shut down. + Order string +} + +// ServiceStatus represents the number of running tasks in a service and the +// number of tasks desired to be running. +type ServiceStatus struct { + // RunningTasks is the number of tasks for the service actually in the + // Running state + RunningTasks uint64 + + // DesiredTasks is the number of tasks desired to be running by the + // service. For replicated services, this is the replica count. For global + // services, this is computed by taking the number of tasks with desired + // state of not-Shutdown. + DesiredTasks uint64 + + // CompletedTasks is the number of tasks in the state Completed, if this + // service is in ReplicatedJob or GlobalJob mode. This field must be + // cross-referenced with the service type, because the default value of 0 + // may mean that a service is not in a job mode, or it may mean that the + // job has yet to complete any tasks. + CompletedTasks uint64 +} + +// JobStatus is the status of a job-type service. +type JobStatus struct { + // JobIteration is a value increased each time a Job is executed, + // successfully or otherwise. "Executed", in this case, means the job as a + // whole has been started, not that an individual Task has been launched. A + // job is "Executed" when its ServiceSpec is updated. JobIteration can be + // used to disambiguate Tasks belonging to different executions of a job. + // + // Though JobIteration will increase with each subsequent execution, it may + // not necessarily increase by 1, and so JobIteration should not be used to + // keep track of the number of times a job has been executed. + JobIteration Version + + // LastExecution is the time that the job was last executed, as observed by + // Swarm manager. + LastExecution time.Time `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/swarm.go b/vendor/github.com/docker/docker/api/types/swarm/swarm.go new file mode 100644 index 0000000000..b25f999646 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/swarm.go @@ -0,0 +1,227 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import ( + "time" +) + +// ClusterInfo represents info about the cluster for outputting in "info" +// it contains the same information as "Swarm", but without the JoinTokens +type ClusterInfo struct { + ID string + Meta + Spec Spec + TLSInfo TLSInfo + RootRotationInProgress bool + DefaultAddrPool []string + SubnetSize uint32 + DataPathPort uint32 +} + +// Swarm represents a swarm. +type Swarm struct { + ClusterInfo + JoinTokens JoinTokens +} + +// JoinTokens contains the tokens workers and managers need to join the swarm. +type JoinTokens struct { + // Worker is the join token workers may use to join the swarm. + Worker string + // Manager is the join token managers may use to join the swarm. + Manager string +} + +// Spec represents the spec of a swarm. +type Spec struct { + Annotations + + Orchestration OrchestrationConfig `json:",omitempty"` + Raft RaftConfig `json:",omitempty"` + Dispatcher DispatcherConfig `json:",omitempty"` + CAConfig CAConfig `json:",omitempty"` + TaskDefaults TaskDefaults `json:",omitempty"` + EncryptionConfig EncryptionConfig `json:",omitempty"` +} + +// OrchestrationConfig represents orchestration configuration. +type OrchestrationConfig struct { + // TaskHistoryRetentionLimit is the number of historic tasks to keep per instance or + // node. If negative, never remove completed or failed tasks. + TaskHistoryRetentionLimit *int64 `json:",omitempty"` +} + +// TaskDefaults parameterizes cluster-level task creation with default values. +type TaskDefaults struct { + // LogDriver selects the log driver to use for tasks created in the + // orchestrator if unspecified by a service. + // + // Updating this value will only have an affect on new tasks. Old tasks + // will continue use their previously configured log driver until + // recreated. + LogDriver *Driver `json:",omitempty"` +} + +// EncryptionConfig controls at-rest encryption of data and keys. +type EncryptionConfig struct { + // AutoLockManagers specifies whether or not managers TLS keys and raft data + // should be encrypted at rest in such a way that they must be unlocked + // before the manager node starts up again. + AutoLockManagers bool +} + +// RaftConfig represents raft configuration. +type RaftConfig struct { + // SnapshotInterval is the number of log entries between snapshots. + SnapshotInterval uint64 `json:",omitempty"` + + // KeepOldSnapshots is the number of snapshots to keep beyond the + // current snapshot. + KeepOldSnapshots *uint64 `json:",omitempty"` + + // LogEntriesForSlowFollowers is the number of log entries to keep + // around to sync up slow followers after a snapshot is created. + LogEntriesForSlowFollowers uint64 `json:",omitempty"` + + // ElectionTick is the number of ticks that a follower will wait for a message + // from the leader before becoming a candidate and starting an election. + // ElectionTick must be greater than HeartbeatTick. + // + // A tick currently defaults to one second, so these translate directly to + // seconds currently, but this is NOT guaranteed. + ElectionTick int + + // HeartbeatTick is the number of ticks between heartbeats. Every + // HeartbeatTick ticks, the leader will send a heartbeat to the + // followers. + // + // A tick currently defaults to one second, so these translate directly to + // seconds currently, but this is NOT guaranteed. + HeartbeatTick int +} + +// DispatcherConfig represents dispatcher configuration. +type DispatcherConfig struct { + // HeartbeatPeriod defines how often agent should send heartbeats to + // dispatcher. + HeartbeatPeriod time.Duration `json:",omitempty"` +} + +// CAConfig represents CA configuration. +type CAConfig struct { + // NodeCertExpiry is the duration certificates should be issued for + NodeCertExpiry time.Duration `json:",omitempty"` + + // ExternalCAs is a list of CAs to which a manager node will make + // certificate signing requests for node certificates. + ExternalCAs []*ExternalCA `json:",omitempty"` + + // SigningCACert and SigningCAKey specify the desired signing root CA and + // root CA key for the swarm. When inspecting the cluster, the key will + // be redacted. + SigningCACert string `json:",omitempty"` + SigningCAKey string `json:",omitempty"` + + // If this value changes, and there is no specified signing cert and key, + // then the swarm is forced to generate a new root certificate ane key. + ForceRotate uint64 `json:",omitempty"` +} + +// ExternalCAProtocol represents type of external CA. +type ExternalCAProtocol string + +// ExternalCAProtocolCFSSL CFSSL +const ExternalCAProtocolCFSSL ExternalCAProtocol = "cfssl" + +// ExternalCA defines external CA to be used by the cluster. +type ExternalCA struct { + // Protocol is the protocol used by this external CA. + Protocol ExternalCAProtocol + + // URL is the URL where the external CA can be reached. + URL string + + // Options is a set of additional key/value pairs whose interpretation + // depends on the specified CA type. + Options map[string]string `json:",omitempty"` + + // CACert specifies which root CA is used by this external CA. This certificate must + // be in PEM format. + CACert string +} + +// InitRequest is the request used to init a swarm. +type InitRequest struct { + ListenAddr string + AdvertiseAddr string + DataPathAddr string + DataPathPort uint32 + ForceNewCluster bool + Spec Spec + AutoLockManagers bool + Availability NodeAvailability + DefaultAddrPool []string + SubnetSize uint32 +} + +// JoinRequest is the request used to join a swarm. +type JoinRequest struct { + ListenAddr string + AdvertiseAddr string + DataPathAddr string + RemoteAddrs []string + JoinToken string // accept by secret + Availability NodeAvailability +} + +// UnlockRequest is the request used to unlock a swarm. +type UnlockRequest struct { + // UnlockKey is the unlock key in ASCII-armored format. + UnlockKey string +} + +// LocalNodeState represents the state of the local node. +type LocalNodeState string + +const ( + // LocalNodeStateInactive INACTIVE + LocalNodeStateInactive LocalNodeState = "inactive" + // LocalNodeStatePending PENDING + LocalNodeStatePending LocalNodeState = "pending" + // LocalNodeStateActive ACTIVE + LocalNodeStateActive LocalNodeState = "active" + // LocalNodeStateError ERROR + LocalNodeStateError LocalNodeState = "error" + // LocalNodeStateLocked LOCKED + LocalNodeStateLocked LocalNodeState = "locked" +) + +// Info represents generic information about swarm. +type Info struct { + NodeID string + NodeAddr string + + LocalNodeState LocalNodeState + ControlAvailable bool + Error string + + RemoteManagers []Peer + Nodes int `json:",omitempty"` + Managers int `json:",omitempty"` + + Cluster *ClusterInfo `json:",omitempty"` + + Warnings []string `json:",omitempty"` +} + +// Peer represents a peer. +type Peer struct { + NodeID string + Addr string +} + +// UpdateFlags contains flags for SwarmUpdate. +type UpdateFlags struct { + RotateWorkerToken bool + RotateManagerToken bool + RotateManagerUnlockKey bool +} diff --git a/vendor/github.com/docker/docker/api/types/swarm/task.go b/vendor/github.com/docker/docker/api/types/swarm/task.go new file mode 100644 index 0000000000..a6f7ab7b5c --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/swarm/task.go @@ -0,0 +1,206 @@ +package swarm // import "github.com/docker/docker/api/types/swarm" + +import ( + "time" + + "github.com/docker/docker/api/types/swarm/runtime" +) + +// TaskState represents the state of a task. +type TaskState string + +const ( + // TaskStateNew NEW + TaskStateNew TaskState = "new" + // TaskStateAllocated ALLOCATED + TaskStateAllocated TaskState = "allocated" + // TaskStatePending PENDING + TaskStatePending TaskState = "pending" + // TaskStateAssigned ASSIGNED + TaskStateAssigned TaskState = "assigned" + // TaskStateAccepted ACCEPTED + TaskStateAccepted TaskState = "accepted" + // TaskStatePreparing PREPARING + TaskStatePreparing TaskState = "preparing" + // TaskStateReady READY + TaskStateReady TaskState = "ready" + // TaskStateStarting STARTING + TaskStateStarting TaskState = "starting" + // TaskStateRunning RUNNING + TaskStateRunning TaskState = "running" + // TaskStateComplete COMPLETE + TaskStateComplete TaskState = "complete" + // TaskStateShutdown SHUTDOWN + TaskStateShutdown TaskState = "shutdown" + // TaskStateFailed FAILED + TaskStateFailed TaskState = "failed" + // TaskStateRejected REJECTED + TaskStateRejected TaskState = "rejected" + // TaskStateRemove REMOVE + TaskStateRemove TaskState = "remove" + // TaskStateOrphaned ORPHANED + TaskStateOrphaned TaskState = "orphaned" +) + +// Task represents a task. +type Task struct { + ID string + Meta + Annotations + + Spec TaskSpec `json:",omitempty"` + ServiceID string `json:",omitempty"` + Slot int `json:",omitempty"` + NodeID string `json:",omitempty"` + Status TaskStatus `json:",omitempty"` + DesiredState TaskState `json:",omitempty"` + NetworksAttachments []NetworkAttachment `json:",omitempty"` + GenericResources []GenericResource `json:",omitempty"` + + // JobIteration is the JobIteration of the Service that this Task was + // spawned from, if the Service is a ReplicatedJob or GlobalJob. This is + // used to determine which Tasks belong to which run of the job. This field + // is absent if the Service mode is Replicated or Global. + JobIteration *Version `json:",omitempty"` +} + +// TaskSpec represents the spec of a task. +type TaskSpec struct { + // ContainerSpec, NetworkAttachmentSpec, and PluginSpec are mutually exclusive. + // PluginSpec is only used when the `Runtime` field is set to `plugin` + // NetworkAttachmentSpec is used if the `Runtime` field is set to + // `attachment`. + ContainerSpec *ContainerSpec `json:",omitempty"` + PluginSpec *runtime.PluginSpec `json:",omitempty"` + NetworkAttachmentSpec *NetworkAttachmentSpec `json:",omitempty"` + + Resources *ResourceRequirements `json:",omitempty"` + RestartPolicy *RestartPolicy `json:",omitempty"` + Placement *Placement `json:",omitempty"` + Networks []NetworkAttachmentConfig `json:",omitempty"` + + // LogDriver specifies the LogDriver to use for tasks created from this + // spec. If not present, the one on cluster default on swarm.Spec will be + // used, finally falling back to the engine default if not specified. + LogDriver *Driver `json:",omitempty"` + + // ForceUpdate is a counter that triggers an update even if no relevant + // parameters have been changed. + ForceUpdate uint64 + + Runtime RuntimeType `json:",omitempty"` +} + +// Resources represents resources (CPU/Memory) which can be advertised by a +// node and requested to be reserved for a task. +type Resources struct { + NanoCPUs int64 `json:",omitempty"` + MemoryBytes int64 `json:",omitempty"` + GenericResources []GenericResource `json:",omitempty"` +} + +// Limit describes limits on resources which can be requested by a task. +type Limit struct { + NanoCPUs int64 `json:",omitempty"` + MemoryBytes int64 `json:",omitempty"` + Pids int64 `json:",omitempty"` +} + +// GenericResource represents a "user defined" resource which can +// be either an integer (e.g: SSD=3) or a string (e.g: SSD=sda1) +type GenericResource struct { + NamedResourceSpec *NamedGenericResource `json:",omitempty"` + DiscreteResourceSpec *DiscreteGenericResource `json:",omitempty"` +} + +// NamedGenericResource represents a "user defined" resource which is defined +// as a string. +// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) +// Value is used to identify the resource (GPU="UUID-1", FPGA="/dev/sdb5", ...) +type NamedGenericResource struct { + Kind string `json:",omitempty"` + Value string `json:",omitempty"` +} + +// DiscreteGenericResource represents a "user defined" resource which is defined +// as an integer +// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) +// Value is used to count the resource (SSD=5, HDD=3, ...) +type DiscreteGenericResource struct { + Kind string `json:",omitempty"` + Value int64 `json:",omitempty"` +} + +// ResourceRequirements represents resources requirements. +type ResourceRequirements struct { + Limits *Limit `json:",omitempty"` + Reservations *Resources `json:",omitempty"` +} + +// Placement represents orchestration parameters. +type Placement struct { + Constraints []string `json:",omitempty"` + Preferences []PlacementPreference `json:",omitempty"` + MaxReplicas uint64 `json:",omitempty"` + + // Platforms stores all the platforms that the image can run on. + // This field is used in the platform filter for scheduling. If empty, + // then the platform filter is off, meaning there are no scheduling restrictions. + Platforms []Platform `json:",omitempty"` +} + +// PlacementPreference provides a way to make the scheduler aware of factors +// such as topology. +type PlacementPreference struct { + Spread *SpreadOver +} + +// SpreadOver is a scheduling preference that instructs the scheduler to spread +// tasks evenly over groups of nodes identified by labels. +type SpreadOver struct { + // label descriptor, such as engine.labels.az + SpreadDescriptor string +} + +// RestartPolicy represents the restart policy. +type RestartPolicy struct { + Condition RestartPolicyCondition `json:",omitempty"` + Delay *time.Duration `json:",omitempty"` + MaxAttempts *uint64 `json:",omitempty"` + Window *time.Duration `json:",omitempty"` +} + +// RestartPolicyCondition represents when to restart. +type RestartPolicyCondition string + +const ( + // RestartPolicyConditionNone NONE + RestartPolicyConditionNone RestartPolicyCondition = "none" + // RestartPolicyConditionOnFailure ON_FAILURE + RestartPolicyConditionOnFailure RestartPolicyCondition = "on-failure" + // RestartPolicyConditionAny ANY + RestartPolicyConditionAny RestartPolicyCondition = "any" +) + +// TaskStatus represents the status of a task. +type TaskStatus struct { + Timestamp time.Time `json:",omitempty"` + State TaskState `json:",omitempty"` + Message string `json:",omitempty"` + Err string `json:",omitempty"` + ContainerStatus *ContainerStatus `json:",omitempty"` + PortStatus PortStatus `json:",omitempty"` +} + +// ContainerStatus represents the status of a container. +type ContainerStatus struct { + ContainerID string + PID int + ExitCode int +} + +// PortStatus represents the port status of a task's host ports whose +// service has published host ports +type PortStatus struct { + Ports []PortConfig `json:",omitempty"` +} diff --git a/vendor/github.com/docker/docker/api/types/types.go b/vendor/github.com/docker/docker/api/types/types.go new file mode 100644 index 0000000000..e3a159912e --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/types.go @@ -0,0 +1,635 @@ +package types // import "github.com/docker/docker/api/types" + +import ( + "errors" + "fmt" + "io" + "os" + "strings" + "time" + + "github.com/docker/docker/api/types/container" + "github.com/docker/docker/api/types/filters" + "github.com/docker/docker/api/types/mount" + "github.com/docker/docker/api/types/network" + "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/api/types/swarm" + "github.com/docker/go-connections/nat" +) + +// RootFS returns Image's RootFS description including the layer IDs. +type RootFS struct { + Type string + Layers []string `json:",omitempty"` + BaseLayer string `json:",omitempty"` +} + +// ImageInspect contains response of Engine API: +// GET "/images/{name:.*}/json" +type ImageInspect struct { + ID string `json:"Id"` + RepoTags []string + RepoDigests []string + Parent string + Comment string + Created string + Container string + ContainerConfig *container.Config + DockerVersion string + Author string + Config *container.Config + Architecture string + Variant string `json:",omitempty"` + Os string + OsVersion string `json:",omitempty"` + Size int64 + VirtualSize int64 + GraphDriver GraphDriverData + RootFS RootFS + Metadata ImageMetadata +} + +// ImageMetadata contains engine-local data about the image +type ImageMetadata struct { + LastTagTime time.Time `json:",omitempty"` +} + +// Container contains response of Engine API: +// GET "/containers/json" +type Container struct { + ID string `json:"Id"` + Names []string + Image string + ImageID string + Command string + Created int64 + Ports []Port + SizeRw int64 `json:",omitempty"` + SizeRootFs int64 `json:",omitempty"` + Labels map[string]string + State string + Status string + HostConfig struct { + NetworkMode string `json:",omitempty"` + } + NetworkSettings *SummaryNetworkSettings + Mounts []MountPoint +} + +// CopyConfig contains request body of Engine API: +// POST "/containers/"+containerID+"/copy" +type CopyConfig struct { + Resource string +} + +// ContainerPathStat is used to encode the header from +// GET "/containers/{name:.*}/archive" +// "Name" is the file or directory name. +type ContainerPathStat struct { + Name string `json:"name"` + Size int64 `json:"size"` + Mode os.FileMode `json:"mode"` + Mtime time.Time `json:"mtime"` + LinkTarget string `json:"linkTarget"` +} + +// ContainerStats contains response of Engine API: +// GET "/stats" +type ContainerStats struct { + Body io.ReadCloser `json:"body"` + OSType string `json:"ostype"` +} + +// Ping contains response of Engine API: +// GET "/_ping" +type Ping struct { + APIVersion string + OSType string + Experimental bool + BuilderVersion BuilderVersion +} + +// ComponentVersion describes the version information for a specific component. +type ComponentVersion struct { + Name string + Version string + Details map[string]string `json:",omitempty"` +} + +// Version contains response of Engine API: +// GET "/version" +type Version struct { + Platform struct{ Name string } `json:",omitempty"` + Components []ComponentVersion `json:",omitempty"` + + // The following fields are deprecated, they relate to the Engine component and are kept for backwards compatibility + + Version string + APIVersion string `json:"ApiVersion"` + MinAPIVersion string `json:"MinAPIVersion,omitempty"` + GitCommit string + GoVersion string + Os string + Arch string + KernelVersion string `json:",omitempty"` + Experimental bool `json:",omitempty"` + BuildTime string `json:",omitempty"` +} + +// Commit holds the Git-commit (SHA1) that a binary was built from, as reported +// in the version-string of external tools, such as containerd, or runC. +type Commit struct { + ID string // ID is the actual commit ID of external tool. + Expected string // Expected is the commit ID of external tool expected by dockerd as set at build time. +} + +// Info contains response of Engine API: +// GET "/info" +type Info struct { + ID string + Containers int + ContainersRunning int + ContainersPaused int + ContainersStopped int + Images int + Driver string + DriverStatus [][2]string + SystemStatus [][2]string `json:",omitempty"` // SystemStatus is only propagated by the Swarm standalone API + Plugins PluginsInfo + MemoryLimit bool + SwapLimit bool + KernelMemory bool // Deprecated: kernel 5.4 deprecated kmem.limit_in_bytes + KernelMemoryTCP bool + CPUCfsPeriod bool `json:"CpuCfsPeriod"` + CPUCfsQuota bool `json:"CpuCfsQuota"` + CPUShares bool + CPUSet bool + PidsLimit bool + IPv4Forwarding bool + BridgeNfIptables bool + BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` + Debug bool + NFd int + OomKillDisable bool + NGoroutines int + SystemTime string + LoggingDriver string + CgroupDriver string + CgroupVersion string `json:",omitempty"` + NEventsListener int + KernelVersion string + OperatingSystem string + OSVersion string + OSType string + Architecture string + IndexServerAddress string + RegistryConfig *registry.ServiceConfig + NCPU int + MemTotal int64 + GenericResources []swarm.GenericResource + DockerRootDir string + HTTPProxy string `json:"HttpProxy"` + HTTPSProxy string `json:"HttpsProxy"` + NoProxy string + Name string + Labels []string + ExperimentalBuild bool + ServerVersion string + ClusterStore string `json:",omitempty"` // Deprecated: host-discovery and overlay networks with external k/v stores are deprecated + ClusterAdvertise string `json:",omitempty"` // Deprecated: host-discovery and overlay networks with external k/v stores are deprecated + Runtimes map[string]Runtime + DefaultRuntime string + Swarm swarm.Info + // LiveRestoreEnabled determines whether containers should be kept + // running when the daemon is shutdown or upon daemon start if + // running containers are detected + LiveRestoreEnabled bool + Isolation container.Isolation + InitBinary string + ContainerdCommit Commit + RuncCommit Commit + InitCommit Commit + SecurityOptions []string + ProductLicense string `json:",omitempty"` + DefaultAddressPools []NetworkAddressPool `json:",omitempty"` + Warnings []string +} + +// KeyValue holds a key/value pair +type KeyValue struct { + Key, Value string +} + +// NetworkAddressPool is a temp struct used by Info struct +type NetworkAddressPool struct { + Base string + Size int +} + +// SecurityOpt contains the name and options of a security option +type SecurityOpt struct { + Name string + Options []KeyValue +} + +// DecodeSecurityOptions decodes a security options string slice to a type safe +// SecurityOpt +func DecodeSecurityOptions(opts []string) ([]SecurityOpt, error) { + so := []SecurityOpt{} + for _, opt := range opts { + // support output from a < 1.13 docker daemon + if !strings.Contains(opt, "=") { + so = append(so, SecurityOpt{Name: opt}) + continue + } + secopt := SecurityOpt{} + split := strings.Split(opt, ",") + for _, s := range split { + kv := strings.SplitN(s, "=", 2) + if len(kv) != 2 { + return nil, fmt.Errorf("invalid security option %q", s) + } + if kv[0] == "" || kv[1] == "" { + return nil, errors.New("invalid empty security option") + } + if kv[0] == "name" { + secopt.Name = kv[1] + continue + } + secopt.Options = append(secopt.Options, KeyValue{Key: kv[0], Value: kv[1]}) + } + so = append(so, secopt) + } + return so, nil +} + +// PluginsInfo is a temp struct holding Plugins name +// registered with docker daemon. It is used by Info struct +type PluginsInfo struct { + // List of Volume plugins registered + Volume []string + // List of Network plugins registered + Network []string + // List of Authorization plugins registered + Authorization []string + // List of Log plugins registered + Log []string +} + +// ExecStartCheck is a temp struct used by execStart +// Config fields is part of ExecConfig in runconfig package +type ExecStartCheck struct { + // ExecStart will first check if it's detached + Detach bool + // Check if there's a tty + Tty bool +} + +// HealthcheckResult stores information about a single run of a healthcheck probe +type HealthcheckResult struct { + Start time.Time // Start is the time this check started + End time.Time // End is the time this check ended + ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe + Output string // Output from last check +} + +// Health states +const ( + NoHealthcheck = "none" // Indicates there is no healthcheck + Starting = "starting" // Starting indicates that the container is not yet ready + Healthy = "healthy" // Healthy indicates that the container is running correctly + Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem +) + +// Health stores information about the container's healthcheck results +type Health struct { + Status string // Status is one of Starting, Healthy or Unhealthy + FailingStreak int // FailingStreak is the number of consecutive failures + Log []*HealthcheckResult // Log contains the last few results (oldest first) +} + +// ContainerState stores container's running state +// it's part of ContainerJSONBase and will return by "inspect" command +type ContainerState struct { + Status string // String representation of the container state. Can be one of "created", "running", "paused", "restarting", "removing", "exited", or "dead" + Running bool + Paused bool + Restarting bool + OOMKilled bool + Dead bool + Pid int + ExitCode int + Error string + StartedAt string + FinishedAt string + Health *Health `json:",omitempty"` +} + +// ContainerNode stores information about the node that a container +// is running on. It's only used by the Docker Swarm standalone API +type ContainerNode struct { + ID string + IPAddress string `json:"IP"` + Addr string + Name string + Cpus int + Memory int64 + Labels map[string]string +} + +// ContainerJSONBase contains response of Engine API: +// GET "/containers/{name:.*}/json" +type ContainerJSONBase struct { + ID string `json:"Id"` + Created string + Path string + Args []string + State *ContainerState + Image string + ResolvConfPath string + HostnamePath string + HostsPath string + LogPath string + Node *ContainerNode `json:",omitempty"` // Node is only propagated by Docker Swarm standalone API + Name string + RestartCount int + Driver string + Platform string + MountLabel string + ProcessLabel string + AppArmorProfile string + ExecIDs []string + HostConfig *container.HostConfig + GraphDriver GraphDriverData + SizeRw *int64 `json:",omitempty"` + SizeRootFs *int64 `json:",omitempty"` +} + +// ContainerJSON is newly used struct along with MountPoint +type ContainerJSON struct { + *ContainerJSONBase + Mounts []MountPoint + Config *container.Config + NetworkSettings *NetworkSettings +} + +// NetworkSettings exposes the network settings in the api +type NetworkSettings struct { + NetworkSettingsBase + DefaultNetworkSettings + Networks map[string]*network.EndpointSettings +} + +// SummaryNetworkSettings provides a summary of container's networks +// in /containers/json +type SummaryNetworkSettings struct { + Networks map[string]*network.EndpointSettings +} + +// NetworkSettingsBase holds basic information about networks +type NetworkSettingsBase struct { + Bridge string // Bridge is the Bridge name the network uses(e.g. `docker0`) + SandboxID string // SandboxID uniquely represents a container's network stack + HairpinMode bool // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface + LinkLocalIPv6Address string // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix + LinkLocalIPv6PrefixLen int // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address + Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port + SandboxKey string // SandboxKey identifies the sandbox + SecondaryIPAddresses []network.Address + SecondaryIPv6Addresses []network.Address +} + +// DefaultNetworkSettings holds network information +// during the 2 release deprecation period. +// It will be removed in Docker 1.11. +type DefaultNetworkSettings struct { + EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox + Gateway string // Gateway holds the gateway address for the network + GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address + GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address + IPAddress string // IPAddress holds the IPv4 address for the network + IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address + IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6 + MacAddress string // MacAddress holds the MAC address for the network +} + +// MountPoint represents a mount point configuration inside the container. +// This is used for reporting the mountpoints in use by a container. +type MountPoint struct { + Type mount.Type `json:",omitempty"` + Name string `json:",omitempty"` + Source string + Destination string + Driver string `json:",omitempty"` + Mode string + RW bool + Propagation mount.Propagation +} + +// NetworkResource is the body of the "get network" http response message +type NetworkResource struct { + Name string // Name is the requested name of the network + ID string `json:"Id"` // ID uniquely identifies a network on a single machine + Created time.Time // Created is the time the network created + Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level) + Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`) + EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6 + IPAM network.IPAM // IPAM is the network's IP Address Management + Internal bool // Internal represents if the network is used internal only + Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode. + Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster. + ConfigFrom network.ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network. + ConfigOnly bool // ConfigOnly networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services. + Containers map[string]EndpointResource // Containers contains endpoints belonging to the network + Options map[string]string // Options holds the network specific options to use for when creating the network + Labels map[string]string // Labels holds metadata specific to the network being created + Peers []network.PeerInfo `json:",omitempty"` // List of peer nodes for an overlay network + Services map[string]network.ServiceInfo `json:",omitempty"` +} + +// EndpointResource contains network resources allocated and used for a container in a network +type EndpointResource struct { + Name string + EndpointID string + MacAddress string + IPv4Address string + IPv6Address string +} + +// NetworkCreate is the expected body of the "create network" http request message +type NetworkCreate struct { + // Check for networks with duplicate names. + // Network is primarily keyed based on a random ID and not on the name. + // Network name is strictly a user-friendly alias to the network + // which is uniquely identified using ID. + // And there is no guaranteed way to check for duplicates. + // Option CheckDuplicate is there to provide a best effort checking of any networks + // which has the same name but it is not guaranteed to catch all name collisions. + CheckDuplicate bool + Driver string + Scope string + EnableIPv6 bool + IPAM *network.IPAM + Internal bool + Attachable bool + Ingress bool + ConfigOnly bool + ConfigFrom *network.ConfigReference + Options map[string]string + Labels map[string]string +} + +// NetworkCreateRequest is the request message sent to the server for network create call. +type NetworkCreateRequest struct { + NetworkCreate + Name string +} + +// NetworkCreateResponse is the response message sent by the server for network create call +type NetworkCreateResponse struct { + ID string `json:"Id"` + Warning string +} + +// NetworkConnect represents the data to be used to connect a container to the network +type NetworkConnect struct { + Container string + EndpointConfig *network.EndpointSettings `json:",omitempty"` +} + +// NetworkDisconnect represents the data to be used to disconnect a container from the network +type NetworkDisconnect struct { + Container string + Force bool +} + +// NetworkInspectOptions holds parameters to inspect network +type NetworkInspectOptions struct { + Scope string + Verbose bool +} + +// Checkpoint represents the details of a checkpoint +type Checkpoint struct { + Name string // Name is the name of the checkpoint +} + +// Runtime describes an OCI runtime +type Runtime struct { + Path string `json:"path"` + Args []string `json:"runtimeArgs,omitempty"` + + // This is exposed here only for internal use + // It is not currently supported to specify custom shim configs + Shim *ShimConfig `json:"-"` +} + +// ShimConfig is used by runtime to configure containerd shims +type ShimConfig struct { + Binary string + Opts interface{} +} + +// DiskUsage contains response of Engine API: +// GET "/system/df" +type DiskUsage struct { + LayersSize int64 + Images []*ImageSummary + Containers []*Container + Volumes []*Volume + BuildCache []*BuildCache + BuilderSize int64 // deprecated +} + +// ContainersPruneReport contains the response for Engine API: +// POST "/containers/prune" +type ContainersPruneReport struct { + ContainersDeleted []string + SpaceReclaimed uint64 +} + +// VolumesPruneReport contains the response for Engine API: +// POST "/volumes/prune" +type VolumesPruneReport struct { + VolumesDeleted []string + SpaceReclaimed uint64 +} + +// ImagesPruneReport contains the response for Engine API: +// POST "/images/prune" +type ImagesPruneReport struct { + ImagesDeleted []ImageDeleteResponseItem + SpaceReclaimed uint64 +} + +// BuildCachePruneReport contains the response for Engine API: +// POST "/build/prune" +type BuildCachePruneReport struct { + CachesDeleted []string + SpaceReclaimed uint64 +} + +// NetworksPruneReport contains the response for Engine API: +// POST "/networks/prune" +type NetworksPruneReport struct { + NetworksDeleted []string +} + +// SecretCreateResponse contains the information returned to a client +// on the creation of a new secret. +type SecretCreateResponse struct { + // ID is the id of the created secret. + ID string +} + +// SecretListOptions holds parameters to list secrets +type SecretListOptions struct { + Filters filters.Args +} + +// ConfigCreateResponse contains the information returned to a client +// on the creation of a new config. +type ConfigCreateResponse struct { + // ID is the id of the created config. + ID string +} + +// ConfigListOptions holds parameters to list configs +type ConfigListOptions struct { + Filters filters.Args +} + +// PushResult contains the tag, manifest digest, and manifest size from the +// push. It's used to signal this information to the trust code in the client +// so it can sign the manifest if necessary. +type PushResult struct { + Tag string + Digest string + Size int +} + +// BuildResult contains the image id of a successful build +type BuildResult struct { + ID string +} + +// BuildCache contains information about a build cache record +type BuildCache struct { + ID string + Parent string + Type string + Description string + InUse bool + Shared bool + Size int64 + CreatedAt time.Time + LastUsedAt *time.Time + UsageCount int +} + +// BuildCachePruneOptions hold parameters to prune the build cache +type BuildCachePruneOptions struct { + All bool + KeepStorage int64 + Filters filters.Args +} diff --git a/vendor/github.com/docker/docker/api/types/versions/README.md b/vendor/github.com/docker/docker/api/types/versions/README.md new file mode 100644 index 0000000000..1ef911edb0 --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/versions/README.md @@ -0,0 +1,14 @@ +# Legacy API type versions + +This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`. + +Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`. + +## Package name conventions + +The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention: + +1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`. +2. We cannot use `_` because golint complains about it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`. + +For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`. diff --git a/vendor/github.com/docker/docker/api/types/versions/compare.go b/vendor/github.com/docker/docker/api/types/versions/compare.go new file mode 100644 index 0000000000..8ccb0aa92e --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/versions/compare.go @@ -0,0 +1,62 @@ +package versions // import "github.com/docker/docker/api/types/versions" + +import ( + "strconv" + "strings" +) + +// compare compares two version strings +// returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise. +func compare(v1, v2 string) int { + var ( + currTab = strings.Split(v1, ".") + otherTab = strings.Split(v2, ".") + ) + + max := len(currTab) + if len(otherTab) > max { + max = len(otherTab) + } + for i := 0; i < max; i++ { + var currInt, otherInt int + + if len(currTab) > i { + currInt, _ = strconv.Atoi(currTab[i]) + } + if len(otherTab) > i { + otherInt, _ = strconv.Atoi(otherTab[i]) + } + if currInt > otherInt { + return 1 + } + if otherInt > currInt { + return -1 + } + } + return 0 +} + +// LessThan checks if a version is less than another +func LessThan(v, other string) bool { + return compare(v, other) == -1 +} + +// LessThanOrEqualTo checks if a version is less than or equal to another +func LessThanOrEqualTo(v, other string) bool { + return compare(v, other) <= 0 +} + +// GreaterThan checks if a version is greater than another +func GreaterThan(v, other string) bool { + return compare(v, other) == 1 +} + +// GreaterThanOrEqualTo checks if a version is greater than or equal to another +func GreaterThanOrEqualTo(v, other string) bool { + return compare(v, other) >= 0 +} + +// Equal checks if a version is equal to another +func Equal(v, other string) bool { + return compare(v, other) == 0 +} diff --git a/vendor/github.com/docker/docker/api/types/volume.go b/vendor/github.com/docker/docker/api/types/volume.go new file mode 100644 index 0000000000..c69b08448d --- /dev/null +++ b/vendor/github.com/docker/docker/api/types/volume.go @@ -0,0 +1,72 @@ +package types + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +// Volume volume +// swagger:model Volume +type Volume struct { + + // Date/Time the volume was created. + CreatedAt string `json:"CreatedAt,omitempty"` + + // Name of the volume driver used by the volume. + // Required: true + Driver string `json:"Driver"` + + // User-defined key/value metadata. + // Required: true + Labels map[string]string `json:"Labels"` + + // Mount path of the volume on the host. + // Required: true + Mountpoint string `json:"Mountpoint"` + + // Name of the volume. + // Required: true + Name string `json:"Name"` + + // The driver specific options used when creating the volume. + // + // Required: true + Options map[string]string `json:"Options"` + + // The level at which the volume exists. Either `global` for cluster-wide, + // or `local` for machine level. + // + // Required: true + Scope string `json:"Scope"` + + // Low-level details about the volume, provided by the volume driver. + // Details are returned as a map with key/value pairs: + // `{"key":"value","key2":"value2"}`. + // + // The `Status` field is optional, and is omitted if the volume driver + // does not support this feature. + // + Status map[string]interface{} `json:"Status,omitempty"` + + // usage data + UsageData *VolumeUsageData `json:"UsageData,omitempty"` +} + +// VolumeUsageData Usage details about the volume. This information is used by the +// `GET /system/df` endpoint, and omitted in other endpoints. +// +// swagger:model VolumeUsageData +type VolumeUsageData struct { + + // The number of containers referencing this volume. This field + // is set to `-1` if the reference-count is not available. + // + // Required: true + RefCount int64 `json:"RefCount"` + + // Amount of disk space used by the volume (in bytes). This information + // is only available for volumes created with the `"local"` volume + // driver. For volumes created with other volume drivers, this field + // is set to `-1` ("not available") + // + // Required: true + Size int64 `json:"Size"` +} diff --git a/vendor/github.com/docker/docker/errdefs/defs.go b/vendor/github.com/docker/docker/errdefs/defs.go new file mode 100644 index 0000000000..61e7456b4e --- /dev/null +++ b/vendor/github.com/docker/docker/errdefs/defs.go @@ -0,0 +1,69 @@ +package errdefs // import "github.com/docker/docker/errdefs" + +// ErrNotFound signals that the requested object doesn't exist +type ErrNotFound interface { + NotFound() +} + +// ErrInvalidParameter signals that the user input is invalid +type ErrInvalidParameter interface { + InvalidParameter() +} + +// ErrConflict signals that some internal state conflicts with the requested action and can't be performed. +// A change in state should be able to clear this error. +type ErrConflict interface { + Conflict() +} + +// ErrUnauthorized is used to signify that the user is not authorized to perform a specific action +type ErrUnauthorized interface { + Unauthorized() +} + +// ErrUnavailable signals that the requested action/subsystem is not available. +type ErrUnavailable interface { + Unavailable() +} + +// ErrForbidden signals that the requested action cannot be performed under any circumstances. +// When a ErrForbidden is returned, the caller should never retry the action. +type ErrForbidden interface { + Forbidden() +} + +// ErrSystem signals that some internal error occurred. +// An example of this would be a failed mount request. +type ErrSystem interface { + System() +} + +// ErrNotModified signals that an action can't be performed because it's already in the desired state +type ErrNotModified interface { + NotModified() +} + +// ErrNotImplemented signals that the requested action/feature is not implemented on the system as configured. +type ErrNotImplemented interface { + NotImplemented() +} + +// ErrUnknown signals that the kind of error that occurred is not known. +type ErrUnknown interface { + Unknown() +} + +// ErrCancelled signals that the action was cancelled. +type ErrCancelled interface { + Cancelled() +} + +// ErrDeadline signals that the deadline was reached before the action completed. +type ErrDeadline interface { + DeadlineExceeded() +} + +// ErrDataLoss indicates that data was lost or there is data corruption. +type ErrDataLoss interface { + DataLoss() +} diff --git a/vendor/github.com/docker/docker/errdefs/doc.go b/vendor/github.com/docker/docker/errdefs/doc.go new file mode 100644 index 0000000000..c211f174fc --- /dev/null +++ b/vendor/github.com/docker/docker/errdefs/doc.go @@ -0,0 +1,8 @@ +// Package errdefs defines a set of error interfaces that packages should use for communicating classes of errors. +// Errors that cross the package boundary should implement one (and only one) of these interfaces. +// +// Packages should not reference these interfaces directly, only implement them. +// To check if a particular error implements one of these interfaces, there are helper +// functions provided (e.g. `Is`) which can be used rather than asserting the interfaces directly. +// If you must assert on these interfaces, be sure to check the causal chain (`err.Cause()`). +package errdefs // import "github.com/docker/docker/errdefs" diff --git a/vendor/github.com/docker/docker/errdefs/helpers.go b/vendor/github.com/docker/docker/errdefs/helpers.go new file mode 100644 index 0000000000..fe06fb6f70 --- /dev/null +++ b/vendor/github.com/docker/docker/errdefs/helpers.go @@ -0,0 +1,279 @@ +package errdefs // import "github.com/docker/docker/errdefs" + +import "context" + +type errNotFound struct{ error } + +func (errNotFound) NotFound() {} + +func (e errNotFound) Cause() error { + return e.error +} + +func (e errNotFound) Unwrap() error { + return e.error +} + +// NotFound is a helper to create an error of the class with the same name from any error type +func NotFound(err error) error { + if err == nil || IsNotFound(err) { + return err + } + return errNotFound{err} +} + +type errInvalidParameter struct{ error } + +func (errInvalidParameter) InvalidParameter() {} + +func (e errInvalidParameter) Cause() error { + return e.error +} + +func (e errInvalidParameter) Unwrap() error { + return e.error +} + +// InvalidParameter is a helper to create an error of the class with the same name from any error type +func InvalidParameter(err error) error { + if err == nil || IsInvalidParameter(err) { + return err + } + return errInvalidParameter{err} +} + +type errConflict struct{ error } + +func (errConflict) Conflict() {} + +func (e errConflict) Cause() error { + return e.error +} + +func (e errConflict) Unwrap() error { + return e.error +} + +// Conflict is a helper to create an error of the class with the same name from any error type +func Conflict(err error) error { + if err == nil || IsConflict(err) { + return err + } + return errConflict{err} +} + +type errUnauthorized struct{ error } + +func (errUnauthorized) Unauthorized() {} + +func (e errUnauthorized) Cause() error { + return e.error +} + +func (e errUnauthorized) Unwrap() error { + return e.error +} + +// Unauthorized is a helper to create an error of the class with the same name from any error type +func Unauthorized(err error) error { + if err == nil || IsUnauthorized(err) { + return err + } + return errUnauthorized{err} +} + +type errUnavailable struct{ error } + +func (errUnavailable) Unavailable() {} + +func (e errUnavailable) Cause() error { + return e.error +} + +func (e errUnavailable) Unwrap() error { + return e.error +} + +// Unavailable is a helper to create an error of the class with the same name from any error type +func Unavailable(err error) error { + if err == nil || IsUnavailable(err) { + return err + } + return errUnavailable{err} +} + +type errForbidden struct{ error } + +func (errForbidden) Forbidden() {} + +func (e errForbidden) Cause() error { + return e.error +} + +func (e errForbidden) Unwrap() error { + return e.error +} + +// Forbidden is a helper to create an error of the class with the same name from any error type +func Forbidden(err error) error { + if err == nil || IsForbidden(err) { + return err + } + return errForbidden{err} +} + +type errSystem struct{ error } + +func (errSystem) System() {} + +func (e errSystem) Cause() error { + return e.error +} + +func (e errSystem) Unwrap() error { + return e.error +} + +// System is a helper to create an error of the class with the same name from any error type +func System(err error) error { + if err == nil || IsSystem(err) { + return err + } + return errSystem{err} +} + +type errNotModified struct{ error } + +func (errNotModified) NotModified() {} + +func (e errNotModified) Cause() error { + return e.error +} + +func (e errNotModified) Unwrap() error { + return e.error +} + +// NotModified is a helper to create an error of the class with the same name from any error type +func NotModified(err error) error { + if err == nil || IsNotModified(err) { + return err + } + return errNotModified{err} +} + +type errNotImplemented struct{ error } + +func (errNotImplemented) NotImplemented() {} + +func (e errNotImplemented) Cause() error { + return e.error +} + +func (e errNotImplemented) Unwrap() error { + return e.error +} + +// NotImplemented is a helper to create an error of the class with the same name from any error type +func NotImplemented(err error) error { + if err == nil || IsNotImplemented(err) { + return err + } + return errNotImplemented{err} +} + +type errUnknown struct{ error } + +func (errUnknown) Unknown() {} + +func (e errUnknown) Cause() error { + return e.error +} + +func (e errUnknown) Unwrap() error { + return e.error +} + +// Unknown is a helper to create an error of the class with the same name from any error type +func Unknown(err error) error { + if err == nil || IsUnknown(err) { + return err + } + return errUnknown{err} +} + +type errCancelled struct{ error } + +func (errCancelled) Cancelled() {} + +func (e errCancelled) Cause() error { + return e.error +} + +func (e errCancelled) Unwrap() error { + return e.error +} + +// Cancelled is a helper to create an error of the class with the same name from any error type +func Cancelled(err error) error { + if err == nil || IsCancelled(err) { + return err + } + return errCancelled{err} +} + +type errDeadline struct{ error } + +func (errDeadline) DeadlineExceeded() {} + +func (e errDeadline) Cause() error { + return e.error +} + +func (e errDeadline) Unwrap() error { + return e.error +} + +// Deadline is a helper to create an error of the class with the same name from any error type +func Deadline(err error) error { + if err == nil || IsDeadline(err) { + return err + } + return errDeadline{err} +} + +type errDataLoss struct{ error } + +func (errDataLoss) DataLoss() {} + +func (e errDataLoss) Cause() error { + return e.error +} + +func (e errDataLoss) Unwrap() error { + return e.error +} + +// DataLoss is a helper to create an error of the class with the same name from any error type +func DataLoss(err error) error { + if err == nil || IsDataLoss(err) { + return err + } + return errDataLoss{err} +} + +// FromContext returns the error class from the passed in context +func FromContext(ctx context.Context) error { + e := ctx.Err() + if e == nil { + return nil + } + + if e == context.Canceled { + return Cancelled(e) + } + if e == context.DeadlineExceeded { + return Deadline(e) + } + return Unknown(e) +} diff --git a/vendor/github.com/docker/docker/errdefs/http_helpers.go b/vendor/github.com/docker/docker/errdefs/http_helpers.go new file mode 100644 index 0000000000..07552f1cc1 --- /dev/null +++ b/vendor/github.com/docker/docker/errdefs/http_helpers.go @@ -0,0 +1,191 @@ +package errdefs // import "github.com/docker/docker/errdefs" + +import ( + "fmt" + "net/http" + + containerderrors "github.com/containerd/containerd/errdefs" + "github.com/docker/distribution/registry/api/errcode" + "github.com/sirupsen/logrus" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// GetHTTPErrorStatusCode retrieves status code from error message. +func GetHTTPErrorStatusCode(err error) int { + if err == nil { + logrus.WithFields(logrus.Fields{"error": err}).Error("unexpected HTTP error handling") + return http.StatusInternalServerError + } + + var statusCode int + + // Stop right there + // Are you sure you should be adding a new error class here? Do one of the existing ones work? + + // Note that the below functions are already checking the error causal chain for matches. + switch { + case IsNotFound(err): + statusCode = http.StatusNotFound + case IsInvalidParameter(err): + statusCode = http.StatusBadRequest + case IsConflict(err): + statusCode = http.StatusConflict + case IsUnauthorized(err): + statusCode = http.StatusUnauthorized + case IsUnavailable(err): + statusCode = http.StatusServiceUnavailable + case IsForbidden(err): + statusCode = http.StatusForbidden + case IsNotModified(err): + statusCode = http.StatusNotModified + case IsNotImplemented(err): + statusCode = http.StatusNotImplemented + case IsSystem(err) || IsUnknown(err) || IsDataLoss(err) || IsDeadline(err) || IsCancelled(err): + statusCode = http.StatusInternalServerError + default: + statusCode = statusCodeFromGRPCError(err) + if statusCode != http.StatusInternalServerError { + return statusCode + } + statusCode = statusCodeFromContainerdError(err) + if statusCode != http.StatusInternalServerError { + return statusCode + } + statusCode = statusCodeFromDistributionError(err) + if statusCode != http.StatusInternalServerError { + return statusCode + } + if e, ok := err.(causer); ok { + return GetHTTPErrorStatusCode(e.Cause()) + } + + logrus.WithFields(logrus.Fields{ + "module": "api", + "error_type": fmt.Sprintf("%T", err), + }).Debugf("FIXME: Got an API for which error does not match any expected type!!!: %+v", err) + } + + if statusCode == 0 { + statusCode = http.StatusInternalServerError + } + + return statusCode +} + +// FromStatusCode creates an errdef error, based on the provided HTTP status-code +func FromStatusCode(err error, statusCode int) error { + if err == nil { + return err + } + switch statusCode { + case http.StatusNotFound: + err = NotFound(err) + case http.StatusBadRequest: + err = InvalidParameter(err) + case http.StatusConflict: + err = Conflict(err) + case http.StatusUnauthorized: + err = Unauthorized(err) + case http.StatusServiceUnavailable: + err = Unavailable(err) + case http.StatusForbidden: + err = Forbidden(err) + case http.StatusNotModified: + err = NotModified(err) + case http.StatusNotImplemented: + err = NotImplemented(err) + case http.StatusInternalServerError: + if !IsSystem(err) && !IsUnknown(err) && !IsDataLoss(err) && !IsDeadline(err) && !IsCancelled(err) { + err = System(err) + } + default: + logrus.WithFields(logrus.Fields{ + "module": "api", + "status_code": fmt.Sprintf("%d", statusCode), + }).Debugf("FIXME: Got an status-code for which error does not match any expected type!!!: %d", statusCode) + + switch { + case statusCode >= 200 && statusCode < 400: + // it's a client error + case statusCode >= 400 && statusCode < 500: + err = InvalidParameter(err) + case statusCode >= 500 && statusCode < 600: + err = System(err) + default: + err = Unknown(err) + } + } + return err +} + +// statusCodeFromGRPCError returns status code according to gRPC error +func statusCodeFromGRPCError(err error) int { + switch status.Code(err) { + case codes.InvalidArgument: // code 3 + return http.StatusBadRequest + case codes.NotFound: // code 5 + return http.StatusNotFound + case codes.AlreadyExists: // code 6 + return http.StatusConflict + case codes.PermissionDenied: // code 7 + return http.StatusForbidden + case codes.FailedPrecondition: // code 9 + return http.StatusBadRequest + case codes.Unauthenticated: // code 16 + return http.StatusUnauthorized + case codes.OutOfRange: // code 11 + return http.StatusBadRequest + case codes.Unimplemented: // code 12 + return http.StatusNotImplemented + case codes.Unavailable: // code 14 + return http.StatusServiceUnavailable + default: + // codes.Canceled(1) + // codes.Unknown(2) + // codes.DeadlineExceeded(4) + // codes.ResourceExhausted(8) + // codes.Aborted(10) + // codes.Internal(13) + // codes.DataLoss(15) + return http.StatusInternalServerError + } +} + +// statusCodeFromDistributionError returns status code according to registry errcode +// code is loosely based on errcode.ServeJSON() in docker/distribution +func statusCodeFromDistributionError(err error) int { + switch errs := err.(type) { + case errcode.Errors: + if len(errs) < 1 { + return http.StatusInternalServerError + } + if _, ok := errs[0].(errcode.ErrorCoder); ok { + return statusCodeFromDistributionError(errs[0]) + } + case errcode.ErrorCoder: + return errs.ErrorCode().Descriptor().HTTPStatusCode + } + return http.StatusInternalServerError +} + +// statusCodeFromContainerdError returns status code for containerd errors when +// consumed directly (not through gRPC) +func statusCodeFromContainerdError(err error) int { + switch { + case containerderrors.IsInvalidArgument(err): + return http.StatusBadRequest + case containerderrors.IsNotFound(err): + return http.StatusNotFound + case containerderrors.IsAlreadyExists(err): + return http.StatusConflict + case containerderrors.IsFailedPrecondition(err): + return http.StatusPreconditionFailed + case containerderrors.IsUnavailable(err): + return http.StatusServiceUnavailable + case containerderrors.IsNotImplemented(err): + return http.StatusNotImplemented + default: + return http.StatusInternalServerError + } +} diff --git a/vendor/github.com/docker/docker/errdefs/is.go b/vendor/github.com/docker/docker/errdefs/is.go new file mode 100644 index 0000000000..3abf07d0c3 --- /dev/null +++ b/vendor/github.com/docker/docker/errdefs/is.go @@ -0,0 +1,107 @@ +package errdefs // import "github.com/docker/docker/errdefs" + +type causer interface { + Cause() error +} + +func getImplementer(err error) error { + switch e := err.(type) { + case + ErrNotFound, + ErrInvalidParameter, + ErrConflict, + ErrUnauthorized, + ErrUnavailable, + ErrForbidden, + ErrSystem, + ErrNotModified, + ErrNotImplemented, + ErrCancelled, + ErrDeadline, + ErrDataLoss, + ErrUnknown: + return err + case causer: + return getImplementer(e.Cause()) + default: + return err + } +} + +// IsNotFound returns if the passed in error is an ErrNotFound +func IsNotFound(err error) bool { + _, ok := getImplementer(err).(ErrNotFound) + return ok +} + +// IsInvalidParameter returns if the passed in error is an ErrInvalidParameter +func IsInvalidParameter(err error) bool { + _, ok := getImplementer(err).(ErrInvalidParameter) + return ok +} + +// IsConflict returns if the passed in error is an ErrConflict +func IsConflict(err error) bool { + _, ok := getImplementer(err).(ErrConflict) + return ok +} + +// IsUnauthorized returns if the passed in error is an ErrUnauthorized +func IsUnauthorized(err error) bool { + _, ok := getImplementer(err).(ErrUnauthorized) + return ok +} + +// IsUnavailable returns if the passed in error is an ErrUnavailable +func IsUnavailable(err error) bool { + _, ok := getImplementer(err).(ErrUnavailable) + return ok +} + +// IsForbidden returns if the passed in error is an ErrForbidden +func IsForbidden(err error) bool { + _, ok := getImplementer(err).(ErrForbidden) + return ok +} + +// IsSystem returns if the passed in error is an ErrSystem +func IsSystem(err error) bool { + _, ok := getImplementer(err).(ErrSystem) + return ok +} + +// IsNotModified returns if the passed in error is a NotModified error +func IsNotModified(err error) bool { + _, ok := getImplementer(err).(ErrNotModified) + return ok +} + +// IsNotImplemented returns if the passed in error is an ErrNotImplemented +func IsNotImplemented(err error) bool { + _, ok := getImplementer(err).(ErrNotImplemented) + return ok +} + +// IsUnknown returns if the passed in error is an ErrUnknown +func IsUnknown(err error) bool { + _, ok := getImplementer(err).(ErrUnknown) + return ok +} + +// IsCancelled returns if the passed in error is an ErrCancelled +func IsCancelled(err error) bool { + _, ok := getImplementer(err).(ErrCancelled) + return ok +} + +// IsDeadline returns if the passed in error is an ErrDeadline +func IsDeadline(err error) bool { + _, ok := getImplementer(err).(ErrDeadline) + return ok +} + +// IsDataLoss returns if the passed in error is an ErrDataLoss +func IsDataLoss(err error) bool { + _, ok := getImplementer(err).(ErrDataLoss) + return ok +} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go new file mode 100644 index 0000000000..5e6310fdcd --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_linux.go @@ -0,0 +1,93 @@ +package homedir // import "github.com/docker/docker/pkg/homedir" + +import ( + "errors" + "os" + "path/filepath" + "strings" +) + +// GetRuntimeDir returns XDG_RUNTIME_DIR. +// XDG_RUNTIME_DIR is typically configured via pam_systemd. +// GetRuntimeDir returns non-nil error if XDG_RUNTIME_DIR is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetRuntimeDir() (string, error) { + if xdgRuntimeDir := os.Getenv("XDG_RUNTIME_DIR"); xdgRuntimeDir != "" { + return xdgRuntimeDir, nil + } + return "", errors.New("could not get XDG_RUNTIME_DIR") +} + +// StickRuntimeDirContents sets the sticky bit on files that are under +// XDG_RUNTIME_DIR, so that the files won't be periodically removed by the system. +// +// StickyRuntimeDir returns slice of sticked files. +// StickyRuntimeDir returns nil error if XDG_RUNTIME_DIR is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func StickRuntimeDirContents(files []string) ([]string, error) { + runtimeDir, err := GetRuntimeDir() + if err != nil { + // ignore error if runtimeDir is empty + return nil, nil + } + runtimeDir, err = filepath.Abs(runtimeDir) + if err != nil { + return nil, err + } + var sticked []string + for _, f := range files { + f, err = filepath.Abs(f) + if err != nil { + return sticked, err + } + if strings.HasPrefix(f, runtimeDir+"/") { + if err = stick(f); err != nil { + return sticked, err + } + sticked = append(sticked, f) + } + } + return sticked, nil +} + +func stick(f string) error { + st, err := os.Stat(f) + if err != nil { + return err + } + m := st.Mode() + m |= os.ModeSticky + return os.Chmod(f, m) +} + +// GetDataHome returns XDG_DATA_HOME. +// GetDataHome returns $HOME/.local/share and nil error if XDG_DATA_HOME is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetDataHome() (string, error) { + if xdgDataHome := os.Getenv("XDG_DATA_HOME"); xdgDataHome != "" { + return xdgDataHome, nil + } + home := os.Getenv("HOME") + if home == "" { + return "", errors.New("could not get either XDG_DATA_HOME or HOME") + } + return filepath.Join(home, ".local", "share"), nil +} + +// GetConfigHome returns XDG_CONFIG_HOME. +// GetConfigHome returns $HOME/.config and nil error if XDG_CONFIG_HOME is not set. +// +// See also https://standards.freedesktop.org/basedir-spec/latest/ar01s03.html +func GetConfigHome() (string, error) { + if xdgConfigHome := os.Getenv("XDG_CONFIG_HOME"); xdgConfigHome != "" { + return xdgConfigHome, nil + } + home := os.Getenv("HOME") + if home == "" { + return "", errors.New("could not get either XDG_CONFIG_HOME or HOME") + } + return filepath.Join(home, ".config"), nil +} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go new file mode 100644 index 0000000000..67ab9e9b31 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_others.go @@ -0,0 +1,27 @@ +// +build !linux + +package homedir // import "github.com/docker/docker/pkg/homedir" + +import ( + "errors" +) + +// GetRuntimeDir is unsupported on non-linux system. +func GetRuntimeDir() (string, error) { + return "", errors.New("homedir.GetRuntimeDir() is not supported on this system") +} + +// StickRuntimeDirContents is unsupported on non-linux system. +func StickRuntimeDirContents(files []string) ([]string, error) { + return nil, errors.New("homedir.StickRuntimeDirContents() is not supported on this system") +} + +// GetDataHome is unsupported on non-linux system. +func GetDataHome() (string, error) { + return "", errors.New("homedir.GetDataHome() is not supported on this system") +} + +// GetConfigHome is unsupported on non-linux system. +func GetConfigHome() (string, error) { + return "", errors.New("homedir.GetConfigHome() is not supported on this system") +} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go new file mode 100644 index 0000000000..441bd727b6 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_unix.go @@ -0,0 +1,38 @@ +// +build !windows + +package homedir // import "github.com/docker/docker/pkg/homedir" + +import ( + "os" + "os/user" +) + +// Key returns the env var name for the user's home dir based on +// the platform being run on +func Key() string { + return "HOME" +} + +// Get returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +// +// If linking statically with cgo enabled against glibc, ensure the +// osusergo build tag is used. +// +// If needing to do nss lookups, do not disable cgo or set osusergo. +func Get() string { + home := os.Getenv(Key()) + if home == "" { + if u, err := user.Current(); err == nil { + return u.HomeDir + } + } + return home +} + +// GetShortcutString returns the string that is shortcut to user's home directory +// in the native shell of the platform running on. +func GetShortcutString() string { + return "~" +} diff --git a/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go b/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go new file mode 100644 index 0000000000..2f81813b28 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/homedir/homedir_windows.go @@ -0,0 +1,24 @@ +package homedir // import "github.com/docker/docker/pkg/homedir" + +import ( + "os" +) + +// Key returns the env var name for the user's home dir based on +// the platform being run on +func Key() string { + return "USERPROFILE" +} + +// Get returns the home directory of the current user with the help of +// environment variables depending on the target operating system. +// Returned path should be used with "path/filepath" to form new paths. +func Get() string { + return os.Getenv(Key()) +} + +// GetShortcutString returns the string that is shortcut to user's home directory +// in the native shell of the platform running on. +func GetShortcutString() string { + return "%USERPROFILE%" // be careful while using in format functions +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/buffer.go b/vendor/github.com/docker/docker/pkg/ioutils/buffer.go new file mode 100644 index 0000000000..466f79294b --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/buffer.go @@ -0,0 +1,51 @@ +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import ( + "errors" + "io" +) + +var errBufferFull = errors.New("buffer is full") + +type fixedBuffer struct { + buf []byte + pos int + lastRead int +} + +func (b *fixedBuffer) Write(p []byte) (int, error) { + n := copy(b.buf[b.pos:cap(b.buf)], p) + b.pos += n + + if n < len(p) { + if b.pos == cap(b.buf) { + return n, errBufferFull + } + return n, io.ErrShortWrite + } + return n, nil +} + +func (b *fixedBuffer) Read(p []byte) (int, error) { + n := copy(p, b.buf[b.lastRead:b.pos]) + b.lastRead += n + return n, nil +} + +func (b *fixedBuffer) Len() int { + return b.pos - b.lastRead +} + +func (b *fixedBuffer) Cap() int { + return cap(b.buf) +} + +func (b *fixedBuffer) Reset() { + b.pos = 0 + b.lastRead = 0 + b.buf = b.buf[:0] +} + +func (b *fixedBuffer) String() string { + return string(b.buf[b.lastRead:b.pos]) +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go new file mode 100644 index 0000000000..87514b643d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/bytespipe.go @@ -0,0 +1,187 @@ +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import ( + "errors" + "io" + "sync" +) + +// maxCap is the highest capacity to use in byte slices that buffer data. +const maxCap = 1e6 + +// minCap is the lowest capacity to use in byte slices that buffer data +const minCap = 64 + +// blockThreshold is the minimum number of bytes in the buffer which will cause +// a write to BytesPipe to block when allocating a new slice. +const blockThreshold = 1e6 + +var ( + // ErrClosed is returned when Write is called on a closed BytesPipe. + ErrClosed = errors.New("write to closed BytesPipe") + + bufPools = make(map[int]*sync.Pool) + bufPoolsLock sync.Mutex +) + +// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue). +// All written data may be read at most once. Also, BytesPipe allocates +// and releases new byte slices to adjust to current needs, so the buffer +// won't be overgrown after peak loads. +type BytesPipe struct { + mu sync.Mutex + wait *sync.Cond + buf []*fixedBuffer + bufLen int + closeErr error // error to return from next Read. set to nil if not closed. +} + +// NewBytesPipe creates new BytesPipe, initialized by specified slice. +// If buf is nil, then it will be initialized with slice which cap is 64. +// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf). +func NewBytesPipe() *BytesPipe { + bp := &BytesPipe{} + bp.buf = append(bp.buf, getBuffer(minCap)) + bp.wait = sync.NewCond(&bp.mu) + return bp +} + +// Write writes p to BytesPipe. +// It can allocate new []byte slices in a process of writing. +func (bp *BytesPipe) Write(p []byte) (int, error) { + bp.mu.Lock() + + written := 0 +loop0: + for { + if bp.closeErr != nil { + bp.mu.Unlock() + return written, ErrClosed + } + + if len(bp.buf) == 0 { + bp.buf = append(bp.buf, getBuffer(64)) + } + // get the last buffer + b := bp.buf[len(bp.buf)-1] + + n, err := b.Write(p) + written += n + bp.bufLen += n + + // errBufferFull is an error we expect to get if the buffer is full + if err != nil && err != errBufferFull { + bp.wait.Broadcast() + bp.mu.Unlock() + return written, err + } + + // if there was enough room to write all then break + if len(p) == n { + break + } + + // more data: write to the next slice + p = p[n:] + + // make sure the buffer doesn't grow too big from this write + for bp.bufLen >= blockThreshold { + bp.wait.Wait() + if bp.closeErr != nil { + continue loop0 + } + } + + // add new byte slice to the buffers slice and continue writing + nextCap := b.Cap() * 2 + if nextCap > maxCap { + nextCap = maxCap + } + bp.buf = append(bp.buf, getBuffer(nextCap)) + } + bp.wait.Broadcast() + bp.mu.Unlock() + return written, nil +} + +// CloseWithError causes further reads from a BytesPipe to return immediately. +func (bp *BytesPipe) CloseWithError(err error) error { + bp.mu.Lock() + if err != nil { + bp.closeErr = err + } else { + bp.closeErr = io.EOF + } + bp.wait.Broadcast() + bp.mu.Unlock() + return nil +} + +// Close causes further reads from a BytesPipe to return immediately. +func (bp *BytesPipe) Close() error { + return bp.CloseWithError(nil) +} + +// Read reads bytes from BytesPipe. +// Data could be read only once. +func (bp *BytesPipe) Read(p []byte) (n int, err error) { + bp.mu.Lock() + if bp.bufLen == 0 { + if bp.closeErr != nil { + err := bp.closeErr + bp.mu.Unlock() + return 0, err + } + bp.wait.Wait() + if bp.bufLen == 0 && bp.closeErr != nil { + err := bp.closeErr + bp.mu.Unlock() + return 0, err + } + } + + for bp.bufLen > 0 { + b := bp.buf[0] + read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error + n += read + bp.bufLen -= read + + if b.Len() == 0 { + // it's empty so return it to the pool and move to the next one + returnBuffer(b) + bp.buf[0] = nil + bp.buf = bp.buf[1:] + } + + if len(p) == read { + break + } + + p = p[read:] + } + + bp.wait.Broadcast() + bp.mu.Unlock() + return +} + +func returnBuffer(b *fixedBuffer) { + b.Reset() + bufPoolsLock.Lock() + pool := bufPools[b.Cap()] + bufPoolsLock.Unlock() + if pool != nil { + pool.Put(b) + } +} + +func getBuffer(size int) *fixedBuffer { + bufPoolsLock.Lock() + pool, ok := bufPools[size] + if !ok { + pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }} + bufPools[size] = pool + } + bufPoolsLock.Unlock() + return pool.Get().(*fixedBuffer) +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go b/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go new file mode 100644 index 0000000000..534d66ac26 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/fswriters.go @@ -0,0 +1,162 @@ +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import ( + "io" + "io/ioutil" + "os" + "path/filepath" +) + +// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a +// temporary file and closing it atomically changes the temporary file to +// destination path. Writing and closing concurrently is not allowed. +func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) { + f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) + if err != nil { + return nil, err + } + + abspath, err := filepath.Abs(filename) + if err != nil { + return nil, err + } + return &atomicFileWriter{ + f: f, + fn: abspath, + perm: perm, + }, nil +} + +// AtomicWriteFile atomically writes data to a file named by filename. +func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error { + f, err := NewAtomicFileWriter(filename, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + f.(*atomicFileWriter).writeErr = err + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +type atomicFileWriter struct { + f *os.File + fn string + writeErr error + perm os.FileMode +} + +func (w *atomicFileWriter) Write(dt []byte) (int, error) { + n, err := w.f.Write(dt) + if err != nil { + w.writeErr = err + } + return n, err +} + +func (w *atomicFileWriter) Close() (retErr error) { + defer func() { + if retErr != nil || w.writeErr != nil { + os.Remove(w.f.Name()) + } + }() + if err := w.f.Sync(); err != nil { + w.f.Close() + return err + } + if err := w.f.Close(); err != nil { + return err + } + if err := os.Chmod(w.f.Name(), w.perm); err != nil { + return err + } + if w.writeErr == nil { + return os.Rename(w.f.Name(), w.fn) + } + return nil +} + +// AtomicWriteSet is used to atomically write a set +// of files and ensure they are visible at the same time. +// Must be committed to a new directory. +type AtomicWriteSet struct { + root string +} + +// NewAtomicWriteSet creates a new atomic write set to +// atomically create a set of files. The given directory +// is used as the base directory for storing files before +// commit. If no temporary directory is given the system +// default is used. +func NewAtomicWriteSet(tmpDir string) (*AtomicWriteSet, error) { + td, err := ioutil.TempDir(tmpDir, "write-set-") + if err != nil { + return nil, err + } + + return &AtomicWriteSet{ + root: td, + }, nil +} + +// WriteFile writes a file to the set, guaranteeing the file +// has been synced. +func (ws *AtomicWriteSet) WriteFile(filename string, data []byte, perm os.FileMode) error { + f, err := ws.FileWriter(filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, perm) + if err != nil { + return err + } + n, err := f.Write(data) + if err == nil && n < len(data) { + err = io.ErrShortWrite + } + if err1 := f.Close(); err == nil { + err = err1 + } + return err +} + +type syncFileCloser struct { + *os.File +} + +func (w syncFileCloser) Close() error { + err := w.File.Sync() + if err1 := w.File.Close(); err == nil { + err = err1 + } + return err +} + +// FileWriter opens a file writer inside the set. The file +// should be synced and closed before calling commit. +func (ws *AtomicWriteSet) FileWriter(name string, flag int, perm os.FileMode) (io.WriteCloser, error) { + f, err := os.OpenFile(filepath.Join(ws.root, name), flag, perm) + if err != nil { + return nil, err + } + return syncFileCloser{f}, nil +} + +// Cancel cancels the set and removes all temporary data +// created in the set. +func (ws *AtomicWriteSet) Cancel() error { + return os.RemoveAll(ws.root) +} + +// Commit moves all created files to the target directory. The +// target directory must not exist and the parent of the target +// directory must exist. +func (ws *AtomicWriteSet) Commit(target string) error { + return os.Rename(ws.root, target) +} + +// String returns the location the set is writing to. +func (ws *AtomicWriteSet) String() string { + return ws.root +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/readers.go b/vendor/github.com/docker/docker/pkg/ioutils/readers.go new file mode 100644 index 0000000000..1f657bd3dc --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/readers.go @@ -0,0 +1,157 @@ +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import ( + "context" + "crypto/sha256" + "encoding/hex" + "io" +) + +// ReadCloserWrapper wraps an io.Reader, and implements an io.ReadCloser +// It calls the given callback function when closed. It should be constructed +// with NewReadCloserWrapper +type ReadCloserWrapper struct { + io.Reader + closer func() error +} + +// Close calls back the passed closer function +func (r *ReadCloserWrapper) Close() error { + return r.closer() +} + +// NewReadCloserWrapper returns a new io.ReadCloser. +func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { + return &ReadCloserWrapper{ + Reader: r, + closer: closer, + } +} + +type readerErrWrapper struct { + reader io.Reader + closer func() +} + +func (r *readerErrWrapper) Read(p []byte) (int, error) { + n, err := r.reader.Read(p) + if err != nil { + r.closer() + } + return n, err +} + +// NewReaderErrWrapper returns a new io.Reader. +func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { + return &readerErrWrapper{ + reader: r, + closer: closer, + } +} + +// HashData returns the sha256 sum of src. +func HashData(src io.Reader) (string, error) { + h := sha256.New() + if _, err := io.Copy(h, src); err != nil { + return "", err + } + return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil +} + +// OnEOFReader wraps an io.ReadCloser and a function +// the function will run at the end of file or close the file. +type OnEOFReader struct { + Rc io.ReadCloser + Fn func() +} + +func (r *OnEOFReader) Read(p []byte) (n int, err error) { + n, err = r.Rc.Read(p) + if err == io.EOF { + r.runFunc() + } + return +} + +// Close closes the file and run the function. +func (r *OnEOFReader) Close() error { + err := r.Rc.Close() + r.runFunc() + return err +} + +func (r *OnEOFReader) runFunc() { + if fn := r.Fn; fn != nil { + fn() + r.Fn = nil + } +} + +// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read +// operations. +type cancelReadCloser struct { + cancel func() + pR *io.PipeReader // Stream to read from + pW *io.PipeWriter +} + +// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the +// context is cancelled. The returned io.ReadCloser must be closed when it is +// no longer needed. +func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser { + pR, pW := io.Pipe() + + // Create a context used to signal when the pipe is closed + doneCtx, cancel := context.WithCancel(context.Background()) + + p := &cancelReadCloser{ + cancel: cancel, + pR: pR, + pW: pW, + } + + go func() { + _, err := io.Copy(pW, in) + select { + case <-ctx.Done(): + // If the context was closed, p.closeWithError + // was already called. Calling it again would + // change the error that Read returns. + default: + p.closeWithError(err) + } + in.Close() + }() + go func() { + for { + select { + case <-ctx.Done(): + p.closeWithError(ctx.Err()) + case <-doneCtx.Done(): + return + } + } + }() + + return p +} + +// Read wraps the Read method of the pipe that provides data from the wrapped +// ReadCloser. +func (p *cancelReadCloser) Read(buf []byte) (n int, err error) { + return p.pR.Read(buf) +} + +// closeWithError closes the wrapper and its underlying reader. It will +// cause future calls to Read to return err. +func (p *cancelReadCloser) closeWithError(err error) { + p.pW.CloseWithError(err) + p.cancel() +} + +// Close closes the wrapper its underlying reader. It will cause +// future calls to Read to return io.EOF. +func (p *cancelReadCloser) Close() error { + p.closeWithError(io.EOF) + return nil +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go b/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go new file mode 100644 index 0000000000..dc894f9131 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/temp_unix.go @@ -0,0 +1,10 @@ +// +build !windows + +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import "io/ioutil" + +// TempDir on Unix systems is equivalent to ioutil.TempDir. +func TempDir(dir, prefix string) (string, error) { + return ioutil.TempDir(dir, prefix) +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go b/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go new file mode 100644 index 0000000000..ecaba2e36d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/temp_windows.go @@ -0,0 +1,16 @@ +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import ( + "io/ioutil" + + "github.com/docker/docker/pkg/longpath" +) + +// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format. +func TempDir(dir, prefix string) (string, error) { + tempDir, err := ioutil.TempDir(dir, prefix) + if err != nil { + return "", err + } + return longpath.AddPrefix(tempDir), nil +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go b/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go new file mode 100644 index 0000000000..91b8d18266 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/writeflusher.go @@ -0,0 +1,92 @@ +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import ( + "io" + "sync" +) + +// WriteFlusher wraps the Write and Flush operation ensuring that every write +// is a flush. In addition, the Close method can be called to intercept +// Read/Write calls if the targets lifecycle has already ended. +type WriteFlusher struct { + w io.Writer + flusher flusher + flushed chan struct{} + flushedOnce sync.Once + closed chan struct{} + closeLock sync.Mutex +} + +type flusher interface { + Flush() +} + +var errWriteFlusherClosed = io.EOF + +func (wf *WriteFlusher) Write(b []byte) (n int, err error) { + select { + case <-wf.closed: + return 0, errWriteFlusherClosed + default: + } + + n, err = wf.w.Write(b) + wf.Flush() // every write is a flush. + return n, err +} + +// Flush the stream immediately. +func (wf *WriteFlusher) Flush() { + select { + case <-wf.closed: + return + default: + } + + wf.flushedOnce.Do(func() { + close(wf.flushed) + }) + wf.flusher.Flush() +} + +// Flushed returns the state of flushed. +// If it's flushed, return true, or else it return false. +func (wf *WriteFlusher) Flushed() bool { + // BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to + // be used to detect whether or a response code has been issued or not. + // Another hook should be used instead. + var flushed bool + select { + case <-wf.flushed: + flushed = true + default: + } + return flushed +} + +// Close closes the write flusher, disallowing any further writes to the +// target. After the flusher is closed, all calls to write or flush will +// result in an error. +func (wf *WriteFlusher) Close() error { + wf.closeLock.Lock() + defer wf.closeLock.Unlock() + + select { + case <-wf.closed: + return errWriteFlusherClosed + default: + close(wf.closed) + } + return nil +} + +// NewWriteFlusher returns a new WriteFlusher. +func NewWriteFlusher(w io.Writer) *WriteFlusher { + var fl flusher + if f, ok := w.(flusher); ok { + fl = f + } else { + fl = &NopFlusher{} + } + return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})} +} diff --git a/vendor/github.com/docker/docker/pkg/ioutils/writers.go b/vendor/github.com/docker/docker/pkg/ioutils/writers.go new file mode 100644 index 0000000000..61c679497d --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/ioutils/writers.go @@ -0,0 +1,66 @@ +package ioutils // import "github.com/docker/docker/pkg/ioutils" + +import "io" + +// NopWriter represents a type which write operation is nop. +type NopWriter struct{} + +func (*NopWriter) Write(buf []byte) (int, error) { + return len(buf), nil +} + +type nopWriteCloser struct { + io.Writer +} + +func (w *nopWriteCloser) Close() error { return nil } + +// NopWriteCloser returns a nopWriteCloser. +func NopWriteCloser(w io.Writer) io.WriteCloser { + return &nopWriteCloser{w} +} + +// NopFlusher represents a type which flush operation is nop. +type NopFlusher struct{} + +// Flush is a nop operation. +func (f *NopFlusher) Flush() {} + +type writeCloserWrapper struct { + io.Writer + closer func() error +} + +func (r *writeCloserWrapper) Close() error { + return r.closer() +} + +// NewWriteCloserWrapper returns a new io.WriteCloser. +func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { + return &writeCloserWrapper{ + Writer: r, + closer: closer, + } +} + +// WriteCounter wraps a concrete io.Writer and hold a count of the number +// of bytes written to the writer during a "session". +// This can be convenient when write return is masked +// (e.g., json.Encoder.Encode()) +type WriteCounter struct { + Count int64 + Writer io.Writer +} + +// NewWriteCounter returns a new WriteCounter. +func NewWriteCounter(w io.Writer) *WriteCounter { + return &WriteCounter{ + Writer: w, + } +} + +func (wc *WriteCounter) Write(p []byte) (count int, err error) { + count, err = wc.Writer.Write(p) + wc.Count += int64(count) + return +} diff --git a/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go new file mode 100644 index 0000000000..cf8d04b1b2 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/jsonmessage/jsonmessage.go @@ -0,0 +1,283 @@ +package jsonmessage // import "github.com/docker/docker/pkg/jsonmessage" + +import ( + "encoding/json" + "fmt" + "io" + "strings" + "time" + + units "github.com/docker/go-units" + "github.com/moby/term" + "github.com/morikuni/aec" +) + +// RFC3339NanoFixed is time.RFC3339Nano with nanoseconds padded using zeros to +// ensure the formatted time isalways the same number of characters. +const RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" + +// JSONError wraps a concrete Code and Message, `Code` is +// is an integer error code, `Message` is the error message. +type JSONError struct { + Code int `json:"code,omitempty"` + Message string `json:"message,omitempty"` +} + +func (e *JSONError) Error() string { + return e.Message +} + +// JSONProgress describes a Progress. terminalFd is the fd of the current terminal, +// Start is the initial value for the operation. Current is the current status and +// value of the progress made towards Total. Total is the end value describing when +// we made 100% progress for an operation. +type JSONProgress struct { + terminalFd uintptr + Current int64 `json:"current,omitempty"` + Total int64 `json:"total,omitempty"` + Start int64 `json:"start,omitempty"` + // If true, don't show xB/yB + HideCounts bool `json:"hidecounts,omitempty"` + Units string `json:"units,omitempty"` + nowFunc func() time.Time + winSize int +} + +func (p *JSONProgress) String() string { + var ( + width = p.width() + pbBox string + numbersBox string + timeLeftBox string + ) + if p.Current <= 0 && p.Total <= 0 { + return "" + } + if p.Total <= 0 { + switch p.Units { + case "": + current := units.HumanSize(float64(p.Current)) + return fmt.Sprintf("%8v", current) + default: + return fmt.Sprintf("%d %s", p.Current, p.Units) + } + } + + percentage := int(float64(p.Current)/float64(p.Total)*100) / 2 + if percentage > 50 { + percentage = 50 + } + if width > 110 { + // this number can't be negative gh#7136 + numSpaces := 0 + if 50-percentage > 0 { + numSpaces = 50 - percentage + } + pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces)) + } + + switch { + case p.HideCounts: + case p.Units == "": // no units, use bytes + current := units.HumanSize(float64(p.Current)) + total := units.HumanSize(float64(p.Total)) + + numbersBox = fmt.Sprintf("%8v/%v", current, total) + + if p.Current > p.Total { + // remove total display if the reported current is wonky. + numbersBox = fmt.Sprintf("%8v", current) + } + default: + numbersBox = fmt.Sprintf("%d/%d %s", p.Current, p.Total, p.Units) + + if p.Current > p.Total { + // remove total display if the reported current is wonky. + numbersBox = fmt.Sprintf("%d %s", p.Current, p.Units) + } + } + + if p.Current > 0 && p.Start > 0 && percentage < 50 { + fromStart := p.now().Sub(time.Unix(p.Start, 0)) + perEntry := fromStart / time.Duration(p.Current) + left := time.Duration(p.Total-p.Current) * perEntry + left = (left / time.Second) * time.Second + + if width > 50 { + timeLeftBox = " " + left.String() + } + } + return pbBox + numbersBox + timeLeftBox +} + +// shim for testing +func (p *JSONProgress) now() time.Time { + if p.nowFunc == nil { + p.nowFunc = func() time.Time { + return time.Now().UTC() + } + } + return p.nowFunc() +} + +// shim for testing +func (p *JSONProgress) width() int { + if p.winSize != 0 { + return p.winSize + } + ws, err := term.GetWinsize(p.terminalFd) + if err == nil { + return int(ws.Width) + } + return 200 +} + +// JSONMessage defines a message struct. It describes +// the created time, where it from, status, ID of the +// message. It's used for docker events. +type JSONMessage struct { + Stream string `json:"stream,omitempty"` + Status string `json:"status,omitempty"` + Progress *JSONProgress `json:"progressDetail,omitempty"` + ProgressMessage string `json:"progress,omitempty"` // deprecated + ID string `json:"id,omitempty"` + From string `json:"from,omitempty"` + Time int64 `json:"time,omitempty"` + TimeNano int64 `json:"timeNano,omitempty"` + Error *JSONError `json:"errorDetail,omitempty"` + ErrorMessage string `json:"error,omitempty"` // deprecated + // Aux contains out-of-band data, such as digests for push signing and image id after building. + Aux *json.RawMessage `json:"aux,omitempty"` +} + +func clearLine(out io.Writer) { + eraseMode := aec.EraseModes.All + cl := aec.EraseLine(eraseMode) + fmt.Fprint(out, cl) +} + +func cursorUp(out io.Writer, l uint) { + fmt.Fprint(out, aec.Up(l)) +} + +func cursorDown(out io.Writer, l uint) { + fmt.Fprint(out, aec.Down(l)) +} + +// Display displays the JSONMessage to `out`. If `isTerminal` is true, it will erase the +// entire current line when displaying the progressbar. +func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error { + if jm.Error != nil { + if jm.Error.Code == 401 { + return fmt.Errorf("authentication is required") + } + return jm.Error + } + var endl string + if isTerminal && jm.Stream == "" && jm.Progress != nil { + clearLine(out) + endl = "\r" + fmt.Fprint(out, endl) + } else if jm.Progress != nil && jm.Progress.String() != "" { // disable progressbar in non-terminal + return nil + } + if jm.TimeNano != 0 { + fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(RFC3339NanoFixed)) + } else if jm.Time != 0 { + fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(RFC3339NanoFixed)) + } + if jm.ID != "" { + fmt.Fprintf(out, "%s: ", jm.ID) + } + if jm.From != "" { + fmt.Fprintf(out, "(from %s) ", jm.From) + } + if jm.Progress != nil && isTerminal { + fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl) + } else if jm.ProgressMessage != "" { // deprecated + fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl) + } else if jm.Stream != "" { + fmt.Fprintf(out, "%s%s", jm.Stream, endl) + } else { + fmt.Fprintf(out, "%s%s\n", jm.Status, endl) + } + return nil +} + +// DisplayJSONMessagesStream displays a json message stream from `in` to `out`, `isTerminal` +// describes if `out` is a terminal. If this is the case, it will print `\n` at the end of +// each line and move the cursor while displaying. +func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(JSONMessage)) error { + var ( + dec = json.NewDecoder(in) + ids = make(map[string]uint) + ) + + for { + var diff uint + var jm JSONMessage + if err := dec.Decode(&jm); err != nil { + if err == io.EOF { + break + } + return err + } + + if jm.Aux != nil { + if auxCallback != nil { + auxCallback(jm) + } + continue + } + + if jm.Progress != nil { + jm.Progress.terminalFd = terminalFd + } + if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") { + line, ok := ids[jm.ID] + if !ok { + // NOTE: This approach of using len(id) to + // figure out the number of lines of history + // only works as long as we clear the history + // when we output something that's not + // accounted for in the map, such as a line + // with no ID. + line = uint(len(ids)) + ids[jm.ID] = line + if isTerminal { + fmt.Fprintf(out, "\n") + } + } + diff = uint(len(ids)) - line + if isTerminal { + cursorUp(out, diff) + } + } else { + // When outputting something that isn't progress + // output, clear the history of previous lines. We + // don't want progress entries from some previous + // operation to be updated (for example, pull -a + // with multiple tags). + ids = make(map[string]uint) + } + err := jm.Display(out, isTerminal) + if jm.ID != "" && isTerminal { + cursorDown(out, diff) + } + if err != nil { + return err + } + } + return nil +} + +type stream interface { + io.Writer + FD() uintptr + IsTerminal() bool +} + +// DisplayJSONMessagesToStream prints json messages to the output stream +func DisplayJSONMessagesToStream(in io.Reader, stream stream, auxCallback func(JSONMessage)) error { + return DisplayJSONMessagesStream(in, stream, stream.FD(), stream.IsTerminal(), auxCallback) +} diff --git a/vendor/github.com/docker/docker/pkg/longpath/longpath.go b/vendor/github.com/docker/docker/pkg/longpath/longpath.go new file mode 100644 index 0000000000..4177affba2 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/longpath/longpath.go @@ -0,0 +1,26 @@ +// longpath introduces some constants and helper functions for handling long paths +// in Windows, which are expected to be prepended with `\\?\` and followed by either +// a drive letter, a UNC server\share, or a volume identifier. + +package longpath // import "github.com/docker/docker/pkg/longpath" + +import ( + "strings" +) + +// Prefix is the longpath prefix for Windows file paths. +const Prefix = `\\?\` + +// AddPrefix will add the Windows long path prefix to the path provided if +// it does not already have it. +func AddPrefix(path string) string { + if !strings.HasPrefix(path, Prefix) { + if strings.HasPrefix(path, `\\`) { + // This is a UNC path, so we need to add 'UNC' to the path as well. + path = Prefix + `UNC` + path[1:] + } else { + path = Prefix + path + } + } + return path +} diff --git a/vendor/github.com/docker/docker/pkg/stringid/README.md b/vendor/github.com/docker/docker/pkg/stringid/README.md new file mode 100644 index 0000000000..37a5098fd9 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/stringid/README.md @@ -0,0 +1 @@ +This package provides helper functions for dealing with string identifiers diff --git a/vendor/github.com/docker/docker/pkg/stringid/stringid.go b/vendor/github.com/docker/docker/pkg/stringid/stringid.go new file mode 100644 index 0000000000..5fe071d628 --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/stringid/stringid.go @@ -0,0 +1,63 @@ +// Package stringid provides helper functions for dealing with string identifiers +package stringid // import "github.com/docker/docker/pkg/stringid" + +import ( + "crypto/rand" + "encoding/hex" + "fmt" + "regexp" + "strconv" + "strings" +) + +const shortLen = 12 + +var ( + validShortID = regexp.MustCompile("^[a-f0-9]{12}$") + validHex = regexp.MustCompile(`^[a-f0-9]{64}$`) +) + +// IsShortID determines if an arbitrary string *looks like* a short ID. +func IsShortID(id string) bool { + return validShortID.MatchString(id) +} + +// TruncateID returns a shorthand version of a string identifier for convenience. +// A collision with other shorthands is very unlikely, but possible. +// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller +// will need to use a longer prefix, or the full-length Id. +func TruncateID(id string) string { + if i := strings.IndexRune(id, ':'); i >= 0 { + id = id[i+1:] + } + if len(id) > shortLen { + id = id[:shortLen] + } + return id +} + +// GenerateRandomID returns a unique id. +func GenerateRandomID() string { + b := make([]byte, 32) + for { + if _, err := rand.Read(b); err != nil { + panic(err) // This shouldn't happen + } + id := hex.EncodeToString(b) + // if we try to parse the truncated for as an int and we don't have + // an error then the value is all numeric and causes issues when + // used as a hostname. ref #3869 + if _, err := strconv.ParseInt(TruncateID(id), 10, 64); err == nil { + continue + } + return id + } +} + +// ValidateID checks whether an ID string is a valid image ID. +func ValidateID(id string) error { + if ok := validHex.MatchString(id); !ok { + return fmt.Errorf("image ID %q is invalid", id) + } + return nil +} diff --git a/vendor/github.com/docker/docker/registry/auth.go b/vendor/github.com/docker/docker/registry/auth.go new file mode 100644 index 0000000000..2d0ecde2d4 --- /dev/null +++ b/vendor/github.com/docker/docker/registry/auth.go @@ -0,0 +1,247 @@ +package registry // import "github.com/docker/docker/registry" + +import ( + "net/http" + "net/url" + "strings" + "time" + + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/distribution/registry/client/auth/challenge" + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + // AuthClientID is used the ClientID used for the token server + AuthClientID = "docker" +) + +type loginCredentialStore struct { + authConfig *types.AuthConfig +} + +func (lcs loginCredentialStore) Basic(*url.URL) (string, string) { + return lcs.authConfig.Username, lcs.authConfig.Password +} + +func (lcs loginCredentialStore) RefreshToken(*url.URL, string) string { + return lcs.authConfig.IdentityToken +} + +func (lcs loginCredentialStore) SetRefreshToken(u *url.URL, service, token string) { + lcs.authConfig.IdentityToken = token +} + +type staticCredentialStore struct { + auth *types.AuthConfig +} + +// NewStaticCredentialStore returns a credential store +// which always returns the same credential values. +func NewStaticCredentialStore(auth *types.AuthConfig) auth.CredentialStore { + return staticCredentialStore{ + auth: auth, + } +} + +func (scs staticCredentialStore) Basic(*url.URL) (string, string) { + if scs.auth == nil { + return "", "" + } + return scs.auth.Username, scs.auth.Password +} + +func (scs staticCredentialStore) RefreshToken(*url.URL, string) string { + if scs.auth == nil { + return "" + } + return scs.auth.IdentityToken +} + +func (scs staticCredentialStore) SetRefreshToken(*url.URL, string, string) { +} + +type fallbackError struct { + err error +} + +func (err fallbackError) Error() string { + return err.err.Error() +} + +// loginV2 tries to login to the v2 registry server. The given registry +// endpoint will be pinged to get authorization challenges. These challenges +// will be used to authenticate against the registry to validate credentials. +func loginV2(authConfig *types.AuthConfig, endpoint APIEndpoint, userAgent string) (string, string, error) { + var ( + endpointStr = strings.TrimRight(endpoint.URL.String(), "/") + "/v2/" + modifiers = Headers(userAgent, nil) + authTransport = transport.NewTransport(NewTransport(endpoint.TLSConfig), modifiers...) + credentialAuthConfig = *authConfig + creds = loginCredentialStore{authConfig: &credentialAuthConfig} + ) + + logrus.Debugf("attempting v2 login to registry endpoint %s", endpointStr) + + loginClient, foundV2, err := v2AuthHTTPClient(endpoint.URL, authTransport, modifiers, creds, nil) + if err != nil { + return "", "", err + } + + req, err := http.NewRequest(http.MethodGet, endpointStr, nil) + if err != nil { + if !foundV2 { + err = fallbackError{err: err} + } + return "", "", err + } + + resp, err := loginClient.Do(req) + if err != nil { + err = translateV2AuthError(err) + if !foundV2 { + err = fallbackError{err: err} + } + + return "", "", err + } + defer resp.Body.Close() + + if resp.StatusCode == http.StatusOK { + return "Login Succeeded", credentialAuthConfig.IdentityToken, nil + } + + // TODO(dmcgowan): Attempt to further interpret result, status code and error code string + err = errors.Errorf("login attempt to %s failed with status: %d %s", endpointStr, resp.StatusCode, http.StatusText(resp.StatusCode)) + if !foundV2 { + err = fallbackError{err: err} + } + return "", "", err +} + +func v2AuthHTTPClient(endpoint *url.URL, authTransport http.RoundTripper, modifiers []transport.RequestModifier, creds auth.CredentialStore, scopes []auth.Scope) (*http.Client, bool, error) { + challengeManager, foundV2, err := PingV2Registry(endpoint, authTransport) + if err != nil { + if !foundV2 { + err = fallbackError{err: err} + } + return nil, foundV2, err + } + + tokenHandlerOptions := auth.TokenHandlerOptions{ + Transport: authTransport, + Credentials: creds, + OfflineAccess: true, + ClientID: AuthClientID, + Scopes: scopes, + } + tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions) + basicHandler := auth.NewBasicHandler(creds) + modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler)) + tr := transport.NewTransport(authTransport, modifiers...) + + return &http.Client{ + Transport: tr, + Timeout: 15 * time.Second, + }, foundV2, nil + +} + +// ConvertToHostname converts a registry url which has http|https prepended +// to just an hostname. +func ConvertToHostname(url string) string { + stripped := url + if strings.HasPrefix(url, "http://") { + stripped = strings.TrimPrefix(url, "http://") + } else if strings.HasPrefix(url, "https://") { + stripped = strings.TrimPrefix(url, "https://") + } + + nameParts := strings.SplitN(stripped, "/", 2) + + return nameParts[0] +} + +// ResolveAuthConfig matches an auth configuration to a server address or a URL +func ResolveAuthConfig(authConfigs map[string]types.AuthConfig, index *registrytypes.IndexInfo) types.AuthConfig { + configKey := GetAuthConfigKey(index) + // First try the happy case + if c, found := authConfigs[configKey]; found || index.Official { + return c + } + + // Maybe they have a legacy config file, we will iterate the keys converting + // them to the new format and testing + for registry, ac := range authConfigs { + if configKey == ConvertToHostname(registry) { + return ac + } + } + + // When all else fails, return an empty auth config + return types.AuthConfig{} +} + +// PingResponseError is used when the response from a ping +// was received but invalid. +type PingResponseError struct { + Err error +} + +func (err PingResponseError) Error() string { + return err.Err.Error() +} + +// PingV2Registry attempts to ping a v2 registry and on success return a +// challenge manager for the supported authentication types and +// whether v2 was confirmed by the response. If a response is received but +// cannot be interpreted a PingResponseError will be returned. +func PingV2Registry(endpoint *url.URL, transport http.RoundTripper) (challenge.Manager, bool, error) { + var ( + foundV2 = false + v2Version = auth.APIVersion{ + Type: "registry", + Version: "2.0", + } + ) + + pingClient := &http.Client{ + Transport: transport, + Timeout: 15 * time.Second, + } + endpointStr := strings.TrimRight(endpoint.String(), "/") + "/v2/" + req, err := http.NewRequest(http.MethodGet, endpointStr, nil) + if err != nil { + return nil, false, err + } + resp, err := pingClient.Do(req) + if err != nil { + return nil, false, err + } + defer resp.Body.Close() + + versions := auth.APIVersions(resp, DefaultRegistryVersionHeader) + for _, pingVersion := range versions { + if pingVersion == v2Version { + // The version header indicates we're definitely + // talking to a v2 registry. So don't allow future + // fallbacks to the v1 protocol. + + foundV2 = true + break + } + } + + challengeManager := challenge.NewSimpleManager() + if err := challengeManager.AddResponse(resp); err != nil { + return nil, foundV2, PingResponseError{ + Err: err, + } + } + + return challengeManager, foundV2, nil +} diff --git a/vendor/github.com/docker/docker/registry/config.go b/vendor/github.com/docker/docker/registry/config.go new file mode 100644 index 0000000000..54b83fa40a --- /dev/null +++ b/vendor/github.com/docker/docker/registry/config.go @@ -0,0 +1,433 @@ +package registry // import "github.com/docker/docker/registry" + +import ( + "fmt" + "net" + "net/url" + "regexp" + "strconv" + "strings" + + "github.com/docker/distribution/reference" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// ServiceOptions holds command line options. +type ServiceOptions struct { + AllowNondistributableArtifacts []string `json:"allow-nondistributable-artifacts,omitempty"` + Mirrors []string `json:"registry-mirrors,omitempty"` + InsecureRegistries []string `json:"insecure-registries,omitempty"` +} + +// serviceConfig holds daemon configuration for the registry service. +type serviceConfig struct { + registrytypes.ServiceConfig +} + +const ( + // DefaultNamespace is the default namespace + DefaultNamespace = "docker.io" + // DefaultRegistryVersionHeader is the name of the default HTTP header + // that carries Registry version info + DefaultRegistryVersionHeader = "Docker-Distribution-Api-Version" + + // IndexHostname is the index hostname + IndexHostname = "index.docker.io" + // IndexServer is used for user auth and image search + IndexServer = "https://" + IndexHostname + "/v1/" + // IndexName is the name of the index + IndexName = "docker.io" +) + +var ( + // DefaultV2Registry is the URI of the default v2 registry + DefaultV2Registry = &url.URL{ + Scheme: "https", + Host: "registry-1.docker.io", + } + + // ErrInvalidRepositoryName is an error returned if the repository name did + // not have the correct form + ErrInvalidRepositoryName = errors.New("Invalid repository name (ex: \"registry.domain.tld/myrepos\")") + + emptyServiceConfig, _ = newServiceConfig(ServiceOptions{}) + validHostPortRegex = regexp.MustCompile(`^` + reference.DomainRegexp.String() + `$`) + + // for mocking in unit tests + lookupIP = net.LookupIP +) + +// newServiceConfig returns a new instance of ServiceConfig +func newServiceConfig(options ServiceOptions) (*serviceConfig, error) { + config := &serviceConfig{ + ServiceConfig: registrytypes.ServiceConfig{ + InsecureRegistryCIDRs: make([]*registrytypes.NetIPNet, 0), + IndexConfigs: make(map[string]*registrytypes.IndexInfo), + // Hack: Bypass setting the mirrors to IndexConfigs since they are going away + // and Mirrors are only for the official registry anyways. + }, + } + if err := config.LoadAllowNondistributableArtifacts(options.AllowNondistributableArtifacts); err != nil { + return nil, err + } + if err := config.LoadMirrors(options.Mirrors); err != nil { + return nil, err + } + if err := config.LoadInsecureRegistries(options.InsecureRegistries); err != nil { + return nil, err + } + + return config, nil +} + +// LoadAllowNondistributableArtifacts loads allow-nondistributable-artifacts registries into config. +func (config *serviceConfig) LoadAllowNondistributableArtifacts(registries []string) error { + cidrs := map[string]*registrytypes.NetIPNet{} + hostnames := map[string]bool{} + + for _, r := range registries { + if _, err := ValidateIndexName(r); err != nil { + return err + } + if validateNoScheme(r) != nil { + return fmt.Errorf("allow-nondistributable-artifacts registry %s should not contain '://'", r) + } + + if _, ipnet, err := net.ParseCIDR(r); err == nil { + // Valid CIDR. + cidrs[ipnet.String()] = (*registrytypes.NetIPNet)(ipnet) + } else if err := validateHostPort(r); err == nil { + // Must be `host:port` if not CIDR. + hostnames[r] = true + } else { + return fmt.Errorf("allow-nondistributable-artifacts registry %s is not valid: %v", r, err) + } + } + + config.AllowNondistributableArtifactsCIDRs = make([]*(registrytypes.NetIPNet), 0) + for _, c := range cidrs { + config.AllowNondistributableArtifactsCIDRs = append(config.AllowNondistributableArtifactsCIDRs, c) + } + + config.AllowNondistributableArtifactsHostnames = make([]string, 0) + for h := range hostnames { + config.AllowNondistributableArtifactsHostnames = append(config.AllowNondistributableArtifactsHostnames, h) + } + + return nil +} + +// LoadMirrors loads mirrors to config, after removing duplicates. +// Returns an error if mirrors contains an invalid mirror. +func (config *serviceConfig) LoadMirrors(mirrors []string) error { + mMap := map[string]struct{}{} + unique := []string{} + + for _, mirror := range mirrors { + m, err := ValidateMirror(mirror) + if err != nil { + return err + } + if _, exist := mMap[m]; !exist { + mMap[m] = struct{}{} + unique = append(unique, m) + } + } + + config.Mirrors = unique + + // Configure public registry since mirrors may have changed. + config.IndexConfigs[IndexName] = ®istrytypes.IndexInfo{ + Name: IndexName, + Mirrors: config.Mirrors, + Secure: true, + Official: true, + } + + return nil +} + +// LoadInsecureRegistries loads insecure registries to config +func (config *serviceConfig) LoadInsecureRegistries(registries []string) error { + // Localhost is by default considered as an insecure registry + // This is a stop-gap for people who are running a private registry on localhost (especially on Boot2docker). + // + // TODO: should we deprecate this once it is easier for people to set up a TLS registry or change + // daemon flags on boot2docker? + registries = append(registries, "127.0.0.0/8") + + // Store original InsecureRegistryCIDRs and IndexConfigs + // Clean InsecureRegistryCIDRs and IndexConfigs in config, as passed registries has all insecure registry info. + originalCIDRs := config.ServiceConfig.InsecureRegistryCIDRs + originalIndexInfos := config.ServiceConfig.IndexConfigs + + config.ServiceConfig.InsecureRegistryCIDRs = make([]*registrytypes.NetIPNet, 0) + config.ServiceConfig.IndexConfigs = make(map[string]*registrytypes.IndexInfo) + +skip: + for _, r := range registries { + // validate insecure registry + if _, err := ValidateIndexName(r); err != nil { + // before returning err, roll back to original data + config.ServiceConfig.InsecureRegistryCIDRs = originalCIDRs + config.ServiceConfig.IndexConfigs = originalIndexInfos + return err + } + if strings.HasPrefix(strings.ToLower(r), "http://") { + logrus.Warnf("insecure registry %s should not contain 'http://' and 'http://' has been removed from the insecure registry config", r) + r = r[7:] + } else if strings.HasPrefix(strings.ToLower(r), "https://") { + logrus.Warnf("insecure registry %s should not contain 'https://' and 'https://' has been removed from the insecure registry config", r) + r = r[8:] + } else if validateNoScheme(r) != nil { + // Insecure registry should not contain '://' + // before returning err, roll back to original data + config.ServiceConfig.InsecureRegistryCIDRs = originalCIDRs + config.ServiceConfig.IndexConfigs = originalIndexInfos + return fmt.Errorf("insecure registry %s should not contain '://'", r) + } + // Check if CIDR was passed to --insecure-registry + _, ipnet, err := net.ParseCIDR(r) + if err == nil { + // Valid CIDR. If ipnet is already in config.InsecureRegistryCIDRs, skip. + data := (*registrytypes.NetIPNet)(ipnet) + for _, value := range config.InsecureRegistryCIDRs { + if value.IP.String() == data.IP.String() && value.Mask.String() == data.Mask.String() { + continue skip + } + } + // ipnet is not found, add it in config.InsecureRegistryCIDRs + config.InsecureRegistryCIDRs = append(config.InsecureRegistryCIDRs, data) + + } else { + if err := validateHostPort(r); err != nil { + config.ServiceConfig.InsecureRegistryCIDRs = originalCIDRs + config.ServiceConfig.IndexConfigs = originalIndexInfos + return fmt.Errorf("insecure registry %s is not valid: %v", r, err) + + } + // Assume `host:port` if not CIDR. + config.IndexConfigs[r] = ®istrytypes.IndexInfo{ + Name: r, + Mirrors: make([]string, 0), + Secure: false, + Official: false, + } + } + } + + // Configure public registry. + config.IndexConfigs[IndexName] = ®istrytypes.IndexInfo{ + Name: IndexName, + Mirrors: config.Mirrors, + Secure: true, + Official: true, + } + + return nil +} + +// allowNondistributableArtifacts returns true if the provided hostname is part of the list of registries +// that allow push of nondistributable artifacts. +// +// The list can contain elements with CIDR notation to specify a whole subnet. If the subnet contains an IP +// of the registry specified by hostname, true is returned. +// +// hostname should be a URL.Host (`host:port` or `host`) where the `host` part can be either a domain name +// or an IP address. If it is a domain name, then it will be resolved to IP addresses for matching. If +// resolution fails, CIDR matching is not performed. +func allowNondistributableArtifacts(config *serviceConfig, hostname string) bool { + for _, h := range config.AllowNondistributableArtifactsHostnames { + if h == hostname { + return true + } + } + + return isCIDRMatch(config.AllowNondistributableArtifactsCIDRs, hostname) +} + +// isSecureIndex returns false if the provided indexName is part of the list of insecure registries +// Insecure registries accept HTTP and/or accept HTTPS with certificates from unknown CAs. +// +// The list of insecure registries can contain an element with CIDR notation to specify a whole subnet. +// If the subnet contains one of the IPs of the registry specified by indexName, the latter is considered +// insecure. +// +// indexName should be a URL.Host (`host:port` or `host`) where the `host` part can be either a domain name +// or an IP address. If it is a domain name, then it will be resolved in order to check if the IP is contained +// in a subnet. If the resolving is not successful, isSecureIndex will only try to match hostname to any element +// of insecureRegistries. +func isSecureIndex(config *serviceConfig, indexName string) bool { + // Check for configured index, first. This is needed in case isSecureIndex + // is called from anything besides newIndexInfo, in order to honor per-index configurations. + if index, ok := config.IndexConfigs[indexName]; ok { + return index.Secure + } + + return !isCIDRMatch(config.InsecureRegistryCIDRs, indexName) +} + +// isCIDRMatch returns true if URLHost matches an element of cidrs. URLHost is a URL.Host (`host:port` or `host`) +// where the `host` part can be either a domain name or an IP address. If it is a domain name, then it will be +// resolved to IP addresses for matching. If resolution fails, false is returned. +func isCIDRMatch(cidrs []*registrytypes.NetIPNet, URLHost string) bool { + host, _, err := net.SplitHostPort(URLHost) + if err != nil { + // Assume URLHost is of the form `host` without the port and go on. + host = URLHost + } + + addrs, err := lookupIP(host) + if err != nil { + ip := net.ParseIP(host) + if ip != nil { + addrs = []net.IP{ip} + } + + // if ip == nil, then `host` is neither an IP nor it could be looked up, + // either because the index is unreachable, or because the index is behind an HTTP proxy. + // So, len(addrs) == 0 and we're not aborting. + } + + // Try CIDR notation only if addrs has any elements, i.e. if `host`'s IP could be determined. + for _, addr := range addrs { + for _, ipnet := range cidrs { + // check if the addr falls in the subnet + if (*net.IPNet)(ipnet).Contains(addr) { + return true + } + } + } + + return false +} + +// ValidateMirror validates an HTTP(S) registry mirror +func ValidateMirror(val string) (string, error) { + uri, err := url.Parse(val) + if err != nil { + return "", fmt.Errorf("invalid mirror: %q is not a valid URI", val) + } + if uri.Scheme != "http" && uri.Scheme != "https" { + return "", fmt.Errorf("invalid mirror: unsupported scheme %q in %q", uri.Scheme, uri) + } + if (uri.Path != "" && uri.Path != "/") || uri.RawQuery != "" || uri.Fragment != "" { + return "", fmt.Errorf("invalid mirror: path, query, or fragment at end of the URI %q", uri) + } + if uri.User != nil { + // strip password from output + uri.User = url.UserPassword(uri.User.Username(), "xxxxx") + return "", fmt.Errorf("invalid mirror: username/password not allowed in URI %q", uri) + } + return strings.TrimSuffix(val, "/") + "/", nil +} + +// ValidateIndexName validates an index name. +func ValidateIndexName(val string) (string, error) { + // TODO: upstream this to check to reference package + if val == "index.docker.io" { + val = "docker.io" + } + if strings.HasPrefix(val, "-") || strings.HasSuffix(val, "-") { + return "", fmt.Errorf("invalid index name (%s). Cannot begin or end with a hyphen", val) + } + return val, nil +} + +func validateNoScheme(reposName string) error { + if strings.Contains(reposName, "://") { + // It cannot contain a scheme! + return ErrInvalidRepositoryName + } + return nil +} + +func validateHostPort(s string) error { + // Split host and port, and in case s can not be splitted, assume host only + host, port, err := net.SplitHostPort(s) + if err != nil { + host = s + port = "" + } + // If match against the `host:port` pattern fails, + // it might be `IPv6:port`, which will be captured by net.ParseIP(host) + if !validHostPortRegex.MatchString(s) && net.ParseIP(host) == nil { + return fmt.Errorf("invalid host %q", host) + } + if port != "" { + v, err := strconv.Atoi(port) + if err != nil { + return err + } + if v < 0 || v > 65535 { + return fmt.Errorf("invalid port %q", port) + } + } + return nil +} + +// newIndexInfo returns IndexInfo configuration from indexName +func newIndexInfo(config *serviceConfig, indexName string) (*registrytypes.IndexInfo, error) { + var err error + indexName, err = ValidateIndexName(indexName) + if err != nil { + return nil, err + } + + // Return any configured index info, first. + if index, ok := config.IndexConfigs[indexName]; ok { + return index, nil + } + + // Construct a non-configured index info. + index := ®istrytypes.IndexInfo{ + Name: indexName, + Mirrors: make([]string, 0), + Official: false, + } + index.Secure = isSecureIndex(config, indexName) + return index, nil +} + +// GetAuthConfigKey special-cases using the full index address of the official +// index as the AuthConfig key, and uses the (host)name[:port] for private indexes. +func GetAuthConfigKey(index *registrytypes.IndexInfo) string { + if index.Official { + return IndexServer + } + return index.Name +} + +// newRepositoryInfo validates and breaks down a repository name into a RepositoryInfo +func newRepositoryInfo(config *serviceConfig, name reference.Named) (*RepositoryInfo, error) { + index, err := newIndexInfo(config, reference.Domain(name)) + if err != nil { + return nil, err + } + official := !strings.ContainsRune(reference.FamiliarName(name), '/') + + return &RepositoryInfo{ + Name: reference.TrimNamed(name), + Index: index, + Official: official, + }, nil +} + +// ParseRepositoryInfo performs the breakdown of a repository name into a RepositoryInfo, but +// lacks registry configuration. +func ParseRepositoryInfo(reposName reference.Named) (*RepositoryInfo, error) { + return newRepositoryInfo(emptyServiceConfig, reposName) +} + +// ParseSearchIndexInfo will use repository name to get back an indexInfo. +func ParseSearchIndexInfo(reposName string) (*registrytypes.IndexInfo, error) { + indexName, _ := splitReposSearchTerm(reposName) + + indexInfo, err := newIndexInfo(emptyServiceConfig, indexName) + if err != nil { + return nil, err + } + return indexInfo, nil +} diff --git a/vendor/github.com/docker/docker/registry/config_unix.go b/vendor/github.com/docker/docker/registry/config_unix.go new file mode 100644 index 0000000000..8ee8fedfc1 --- /dev/null +++ b/vendor/github.com/docker/docker/registry/config_unix.go @@ -0,0 +1,31 @@ +// +build !windows + +package registry // import "github.com/docker/docker/registry" + +import ( + "path/filepath" + + "github.com/docker/docker/pkg/homedir" + "github.com/docker/docker/rootless" +) + +// CertsDir is the directory where certificates are stored +func CertsDir() string { + d := "/etc/docker/certs.d" + + if rootless.RunningWithRootlessKit() { + configHome, err := homedir.GetConfigHome() + if err == nil { + d = filepath.Join(configHome, "docker/certs.d") + } + } + return d +} + +// cleanPath is used to ensure that a directory name is valid on the target +// platform. It will be passed in something *similar* to a URL such as +// https:/index.docker.io/v1. Not all platforms support directory names +// which contain those characters (such as : on Windows) +func cleanPath(s string) string { + return s +} diff --git a/vendor/github.com/docker/docker/registry/config_windows.go b/vendor/github.com/docker/docker/registry/config_windows.go new file mode 100644 index 0000000000..4ae1e07abf --- /dev/null +++ b/vendor/github.com/docker/docker/registry/config_windows.go @@ -0,0 +1,20 @@ +package registry // import "github.com/docker/docker/registry" + +import ( + "os" + "path/filepath" + "strings" +) + +// CertsDir is the directory where certificates are stored +func CertsDir() string { + return os.Getenv("programdata") + `\docker\certs.d` +} + +// cleanPath is used to ensure that a directory name is valid on the target +// platform. It will be passed in something *similar* to a URL such as +// https:\index.docker.io\v1. Not all platforms support directory names +// which contain those characters (such as : on Windows) +func cleanPath(s string) string { + return filepath.FromSlash(strings.Replace(s, ":", "", -1)) +} diff --git a/vendor/github.com/docker/docker/registry/endpoint_v1.go b/vendor/github.com/docker/docker/registry/endpoint_v1.go new file mode 100644 index 0000000000..db342d1412 --- /dev/null +++ b/vendor/github.com/docker/docker/registry/endpoint_v1.go @@ -0,0 +1,195 @@ +package registry // import "github.com/docker/docker/registry" + +import ( + "crypto/tls" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + "strings" + + "github.com/docker/distribution/registry/client/transport" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/sirupsen/logrus" +) + +// V1Endpoint stores basic information about a V1 registry endpoint. +type V1Endpoint struct { + client *http.Client + URL *url.URL + IsSecure bool +} + +// NewV1Endpoint parses the given address to return a registry endpoint. +func NewV1Endpoint(index *registrytypes.IndexInfo, userAgent string, metaHeaders http.Header) (*V1Endpoint, error) { + tlsConfig, err := newTLSConfig(index.Name, index.Secure) + if err != nil { + return nil, err + } + + endpoint, err := newV1EndpointFromStr(GetAuthConfigKey(index), tlsConfig, userAgent, metaHeaders) + if err != nil { + return nil, err + } + + if err := validateEndpoint(endpoint); err != nil { + return nil, err + } + + return endpoint, nil +} + +func validateEndpoint(endpoint *V1Endpoint) error { + logrus.Debugf("pinging registry endpoint %s", endpoint) + + // Try HTTPS ping to registry + endpoint.URL.Scheme = "https" + if _, err := endpoint.Ping(); err != nil { + if endpoint.IsSecure { + // If registry is secure and HTTPS failed, show user the error and tell them about `--insecure-registry` + // in case that's what they need. DO NOT accept unknown CA certificates, and DO NOT fallback to HTTP. + return fmt.Errorf("invalid registry endpoint %s: %v. If this private registry supports only HTTP or HTTPS with an unknown CA certificate, please add `--insecure-registry %s` to the daemon's arguments. In the case of HTTPS, if you have access to the registry's CA certificate, no need for the flag; simply place the CA certificate at /etc/docker/certs.d/%s/ca.crt", endpoint, err, endpoint.URL.Host, endpoint.URL.Host) + } + + // If registry is insecure and HTTPS failed, fallback to HTTP. + logrus.Debugf("Error from registry %q marked as insecure: %v. Insecurely falling back to HTTP", endpoint, err) + endpoint.URL.Scheme = "http" + + var err2 error + if _, err2 = endpoint.Ping(); err2 == nil { + return nil + } + + return fmt.Errorf("invalid registry endpoint %q. HTTPS attempt: %v. HTTP attempt: %v", endpoint, err, err2) + } + + return nil +} + +func newV1Endpoint(address url.URL, tlsConfig *tls.Config, userAgent string, metaHeaders http.Header) *V1Endpoint { + endpoint := &V1Endpoint{ + IsSecure: tlsConfig == nil || !tlsConfig.InsecureSkipVerify, + URL: new(url.URL), + } + + *endpoint.URL = address + + // TODO(tiborvass): make sure a ConnectTimeout transport is used + tr := NewTransport(tlsConfig) + endpoint.client = HTTPClient(transport.NewTransport(tr, Headers(userAgent, metaHeaders)...)) + return endpoint +} + +// trimV1Address trims the version off the address and returns the +// trimmed address or an error if there is a non-V1 version. +func trimV1Address(address string) (string, error) { + var ( + chunks []string + apiVersionStr string + ) + + if strings.HasSuffix(address, "/") { + address = address[:len(address)-1] + } + + chunks = strings.Split(address, "/") + apiVersionStr = chunks[len(chunks)-1] + if apiVersionStr == "v1" { + return strings.Join(chunks[:len(chunks)-1], "/"), nil + } + + for k, v := range apiVersions { + if k != APIVersion1 && apiVersionStr == v { + return "", fmt.Errorf("unsupported V1 version path %s", apiVersionStr) + } + } + + return address, nil +} + +func newV1EndpointFromStr(address string, tlsConfig *tls.Config, userAgent string, metaHeaders http.Header) (*V1Endpoint, error) { + if !strings.HasPrefix(address, "http://") && !strings.HasPrefix(address, "https://") { + address = "https://" + address + } + + address, err := trimV1Address(address) + if err != nil { + return nil, err + } + + uri, err := url.Parse(address) + if err != nil { + return nil, err + } + + endpoint := newV1Endpoint(*uri, tlsConfig, userAgent, metaHeaders) + + return endpoint, nil +} + +// Get the formatted URL for the root of this registry Endpoint +func (e *V1Endpoint) String() string { + return e.URL.String() + "/v1/" +} + +// Path returns a formatted string for the URL +// of this endpoint with the given path appended. +func (e *V1Endpoint) Path(path string) string { + return e.URL.String() + "/v1/" + path +} + +// Ping returns a PingResult which indicates whether the registry is standalone or not. +func (e *V1Endpoint) Ping() (PingResult, error) { + logrus.Debugf("attempting v1 ping for registry endpoint %s", e) + + if e.String() == IndexServer { + // Skip the check, we know this one is valid + // (and we never want to fallback to http in case of error) + return PingResult{Standalone: false}, nil + } + + req, err := http.NewRequest(http.MethodGet, e.Path("_ping"), nil) + if err != nil { + return PingResult{Standalone: false}, err + } + + resp, err := e.client.Do(req) + if err != nil { + return PingResult{Standalone: false}, err + } + + defer resp.Body.Close() + + jsonString, err := ioutil.ReadAll(resp.Body) + if err != nil { + return PingResult{Standalone: false}, fmt.Errorf("error while reading the http response: %s", err) + } + + // If the header is absent, we assume true for compatibility with earlier + // versions of the registry. default to true + info := PingResult{ + Standalone: true, + } + if err := json.Unmarshal(jsonString, &info); err != nil { + logrus.Debugf("Error unmarshaling the _ping PingResult: %s", err) + // don't stop here. Just assume sane defaults + } + if hdr := resp.Header.Get("X-Docker-Registry-Version"); hdr != "" { + logrus.Debugf("Registry version header: '%s'", hdr) + info.Version = hdr + } + logrus.Debugf("PingResult.Version: %q", info.Version) + + standalone := resp.Header.Get("X-Docker-Registry-Standalone") + logrus.Debugf("Registry standalone header: '%s'", standalone) + // Accepted values are "true" (case-insensitive) and "1". + if strings.EqualFold(standalone, "true") || standalone == "1" { + info.Standalone = true + } else if len(standalone) > 0 { + // there is a header set, and it is not "true" or "1", so assume fails + info.Standalone = false + } + logrus.Debugf("PingResult.Standalone: %t", info.Standalone) + return info, nil +} diff --git a/vendor/github.com/docker/docker/registry/errors.go b/vendor/github.com/docker/docker/registry/errors.go new file mode 100644 index 0000000000..4906303efc --- /dev/null +++ b/vendor/github.com/docker/docker/registry/errors.go @@ -0,0 +1,23 @@ +package registry // import "github.com/docker/docker/registry" + +import ( + "net/url" + + "github.com/docker/distribution/registry/api/errcode" + "github.com/docker/docker/errdefs" +) + +func translateV2AuthError(err error) error { + switch e := err.(type) { + case *url.Error: + switch e2 := e.Err.(type) { + case errcode.Error: + switch e2.Code { + case errcode.ErrorCodeUnauthorized: + return errdefs.Unauthorized(err) + } + } + } + + return err +} diff --git a/vendor/github.com/docker/docker/registry/registry.go b/vendor/github.com/docker/docker/registry/registry.go new file mode 100644 index 0000000000..7a70bf28b5 --- /dev/null +++ b/vendor/github.com/docker/docker/registry/registry.go @@ -0,0 +1,199 @@ +// Package registry contains client primitives to interact with a remote Docker registry. +package registry // import "github.com/docker/docker/registry" + +import ( + "crypto/tls" + "errors" + "fmt" + "io/ioutil" + "net" + "net/http" + "os" + "path/filepath" + "strings" + "time" + + "github.com/docker/distribution/registry/client/transport" + "github.com/docker/go-connections/tlsconfig" + "github.com/sirupsen/logrus" +) + +var ( + // ErrAlreadyExists is an error returned if an image being pushed + // already exists on the remote side + ErrAlreadyExists = errors.New("Image already exists") +) + +// HostCertsDir returns the config directory for a specific host +func HostCertsDir(hostname string) (string, error) { + certsDir := CertsDir() + + hostDir := filepath.Join(certsDir, cleanPath(hostname)) + + return hostDir, nil +} + +func newTLSConfig(hostname string, isSecure bool) (*tls.Config, error) { + // PreferredServerCipherSuites should have no effect + tlsConfig := tlsconfig.ServerDefault() + + tlsConfig.InsecureSkipVerify = !isSecure + + if isSecure && CertsDir() != "" { + hostDir, err := HostCertsDir(hostname) + if err != nil { + return nil, err + } + + logrus.Debugf("hostDir: %s", hostDir) + if err := ReadCertsDirectory(tlsConfig, hostDir); err != nil { + return nil, err + } + } + + return tlsConfig, nil +} + +func hasFile(files []os.FileInfo, name string) bool { + for _, f := range files { + if f.Name() == name { + return true + } + } + return false +} + +// ReadCertsDirectory reads the directory for TLS certificates +// including roots and certificate pairs and updates the +// provided TLS configuration. +func ReadCertsDirectory(tlsConfig *tls.Config, directory string) error { + fs, err := ioutil.ReadDir(directory) + if err != nil && !os.IsNotExist(err) { + return err + } + + for _, f := range fs { + if strings.HasSuffix(f.Name(), ".crt") { + if tlsConfig.RootCAs == nil { + systemPool, err := tlsconfig.SystemCertPool() + if err != nil { + return fmt.Errorf("unable to get system cert pool: %v", err) + } + tlsConfig.RootCAs = systemPool + } + logrus.Debugf("crt: %s", filepath.Join(directory, f.Name())) + data, err := ioutil.ReadFile(filepath.Join(directory, f.Name())) + if err != nil { + return err + } + tlsConfig.RootCAs.AppendCertsFromPEM(data) + } + if strings.HasSuffix(f.Name(), ".cert") { + certName := f.Name() + keyName := certName[:len(certName)-5] + ".key" + logrus.Debugf("cert: %s", filepath.Join(directory, f.Name())) + if !hasFile(fs, keyName) { + return fmt.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName) + } + cert, err := tls.LoadX509KeyPair(filepath.Join(directory, certName), filepath.Join(directory, keyName)) + if err != nil { + return err + } + tlsConfig.Certificates = append(tlsConfig.Certificates, cert) + } + if strings.HasSuffix(f.Name(), ".key") { + keyName := f.Name() + certName := keyName[:len(keyName)-4] + ".cert" + logrus.Debugf("key: %s", filepath.Join(directory, f.Name())) + if !hasFile(fs, certName) { + return fmt.Errorf("Missing client certificate %s for key %s", certName, keyName) + } + } + } + + return nil +} + +// Headers returns request modifiers with a User-Agent and metaHeaders +func Headers(userAgent string, metaHeaders http.Header) []transport.RequestModifier { + modifiers := []transport.RequestModifier{} + if userAgent != "" { + modifiers = append(modifiers, transport.NewHeaderRequestModifier(http.Header{ + "User-Agent": []string{userAgent}, + })) + } + if metaHeaders != nil { + modifiers = append(modifiers, transport.NewHeaderRequestModifier(metaHeaders)) + } + return modifiers +} + +// HTTPClient returns an HTTP client structure which uses the given transport +// and contains the necessary headers for redirected requests +func HTTPClient(transport http.RoundTripper) *http.Client { + return &http.Client{ + Transport: transport, + CheckRedirect: addRequiredHeadersToRedirectedRequests, + } +} + +func trustedLocation(req *http.Request) bool { + var ( + trusteds = []string{"docker.com", "docker.io"} + hostname = strings.SplitN(req.Host, ":", 2)[0] + ) + if req.URL.Scheme != "https" { + return false + } + + for _, trusted := range trusteds { + if hostname == trusted || strings.HasSuffix(hostname, "."+trusted) { + return true + } + } + return false +} + +// addRequiredHeadersToRedirectedRequests adds the necessary redirection headers +// for redirected requests +func addRequiredHeadersToRedirectedRequests(req *http.Request, via []*http.Request) error { + if len(via) != 0 && via[0] != nil { + if trustedLocation(req) && trustedLocation(via[0]) { + req.Header = via[0].Header + return nil + } + for k, v := range via[0].Header { + if k != "Authorization" { + for _, vv := range v { + req.Header.Add(k, vv) + } + } + } + } + return nil +} + +// NewTransport returns a new HTTP transport. If tlsConfig is nil, it uses the +// default TLS configuration. +func NewTransport(tlsConfig *tls.Config) *http.Transport { + if tlsConfig == nil { + tlsConfig = tlsconfig.ServerDefault() + } + + direct := &net.Dialer{ + Timeout: 30 * time.Second, + KeepAlive: 30 * time.Second, + DualStack: true, + } + + base := &http.Transport{ + Proxy: http.ProxyFromEnvironment, + DialContext: direct.DialContext, + TLSHandshakeTimeout: 10 * time.Second, + TLSClientConfig: tlsConfig, + // TODO(dmcgowan): Call close idle connections when complete and use keep alive + DisableKeepAlives: true, + } + + return base +} diff --git a/vendor/github.com/docker/docker/registry/service.go b/vendor/github.com/docker/docker/registry/service.go new file mode 100644 index 0000000000..3b08e39da2 --- /dev/null +++ b/vendor/github.com/docker/docker/registry/service.go @@ -0,0 +1,297 @@ +package registry // import "github.com/docker/docker/registry" + +import ( + "context" + "crypto/tls" + "net/http" + "net/url" + "strings" + "sync" + + "github.com/docker/distribution/reference" + "github.com/docker/distribution/registry/client/auth" + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/errdefs" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + // DefaultSearchLimit is the default value for maximum number of returned search results. + DefaultSearchLimit = 25 +) + +// Service is the interface defining what a registry service should implement. +type Service interface { + Auth(ctx context.Context, authConfig *types.AuthConfig, userAgent string) (status, token string, err error) + LookupPullEndpoints(hostname string) (endpoints []APIEndpoint, err error) + LookupPushEndpoints(hostname string) (endpoints []APIEndpoint, err error) + ResolveRepository(name reference.Named) (*RepositoryInfo, error) + Search(ctx context.Context, term string, limit int, authConfig *types.AuthConfig, userAgent string, headers map[string][]string) (*registrytypes.SearchResults, error) + ServiceConfig() *registrytypes.ServiceConfig + TLSConfig(hostname string) (*tls.Config, error) + LoadAllowNondistributableArtifacts([]string) error + LoadMirrors([]string) error + LoadInsecureRegistries([]string) error +} + +// DefaultService is a registry service. It tracks configuration data such as a list +// of mirrors. +type DefaultService struct { + config *serviceConfig + mu sync.Mutex +} + +// NewService returns a new instance of DefaultService ready to be +// installed into an engine. +func NewService(options ServiceOptions) (*DefaultService, error) { + config, err := newServiceConfig(options) + + return &DefaultService{config: config}, err +} + +// ServiceConfig returns the public registry service configuration. +func (s *DefaultService) ServiceConfig() *registrytypes.ServiceConfig { + s.mu.Lock() + defer s.mu.Unlock() + + servConfig := registrytypes.ServiceConfig{ + AllowNondistributableArtifactsCIDRs: make([]*(registrytypes.NetIPNet), 0), + AllowNondistributableArtifactsHostnames: make([]string, 0), + InsecureRegistryCIDRs: make([]*(registrytypes.NetIPNet), 0), + IndexConfigs: make(map[string]*(registrytypes.IndexInfo)), + Mirrors: make([]string, 0), + } + + // construct a new ServiceConfig which will not retrieve s.Config directly, + // and look up items in s.config with mu locked + servConfig.AllowNondistributableArtifactsCIDRs = append(servConfig.AllowNondistributableArtifactsCIDRs, s.config.ServiceConfig.AllowNondistributableArtifactsCIDRs...) + servConfig.AllowNondistributableArtifactsHostnames = append(servConfig.AllowNondistributableArtifactsHostnames, s.config.ServiceConfig.AllowNondistributableArtifactsHostnames...) + servConfig.InsecureRegistryCIDRs = append(servConfig.InsecureRegistryCIDRs, s.config.ServiceConfig.InsecureRegistryCIDRs...) + + for key, value := range s.config.ServiceConfig.IndexConfigs { + servConfig.IndexConfigs[key] = value + } + + servConfig.Mirrors = append(servConfig.Mirrors, s.config.ServiceConfig.Mirrors...) + + return &servConfig +} + +// LoadAllowNondistributableArtifacts loads allow-nondistributable-artifacts registries for Service. +func (s *DefaultService) LoadAllowNondistributableArtifacts(registries []string) error { + s.mu.Lock() + defer s.mu.Unlock() + + return s.config.LoadAllowNondistributableArtifacts(registries) +} + +// LoadMirrors loads registry mirrors for Service +func (s *DefaultService) LoadMirrors(mirrors []string) error { + s.mu.Lock() + defer s.mu.Unlock() + + return s.config.LoadMirrors(mirrors) +} + +// LoadInsecureRegistries loads insecure registries for Service +func (s *DefaultService) LoadInsecureRegistries(registries []string) error { + s.mu.Lock() + defer s.mu.Unlock() + + return s.config.LoadInsecureRegistries(registries) +} + +// Auth contacts the public registry with the provided credentials, +// and returns OK if authentication was successful. +// It can be used to verify the validity of a client's credentials. +func (s *DefaultService) Auth(ctx context.Context, authConfig *types.AuthConfig, userAgent string) (status, token string, err error) { + // TODO Use ctx when searching for repositories + var registryHostName = IndexHostname + + if authConfig.ServerAddress != "" { + serverAddress := authConfig.ServerAddress + if !strings.HasPrefix(serverAddress, "https://") && !strings.HasPrefix(serverAddress, "http://") { + serverAddress = "https://" + serverAddress + } + u, err := url.Parse(serverAddress) + if err != nil { + return "", "", errdefs.InvalidParameter(errors.Errorf("unable to parse server address: %v", err)) + } + registryHostName = u.Host + } + + // Lookup endpoints for authentication using "LookupPushEndpoints", which + // excludes mirrors to prevent sending credentials of the upstream registry + // to a mirror. + endpoints, err := s.LookupPushEndpoints(registryHostName) + if err != nil { + return "", "", errdefs.InvalidParameter(err) + } + + for _, endpoint := range endpoints { + status, token, err = loginV2(authConfig, endpoint, userAgent) + if err == nil { + return + } + if fErr, ok := err.(fallbackError); ok { + logrus.WithError(fErr.err).Infof("Error logging in to endpoint, trying next endpoint") + continue + } + + return "", "", err + } + + return "", "", err +} + +// splitReposSearchTerm breaks a search term into an index name and remote name +func splitReposSearchTerm(reposName string) (string, string) { + nameParts := strings.SplitN(reposName, "/", 2) + if len(nameParts) == 1 || (!strings.Contains(nameParts[0], ".") && + !strings.Contains(nameParts[0], ":") && nameParts[0] != "localhost") { + // This is a Docker Hub repository (ex: samalba/hipache or ubuntu), + // use the default Docker Hub registry (docker.io) + return IndexName, reposName + } + return nameParts[0], nameParts[1] +} + +// Search queries the public registry for images matching the specified +// search terms, and returns the results. +func (s *DefaultService) Search(ctx context.Context, term string, limit int, authConfig *types.AuthConfig, userAgent string, headers map[string][]string) (*registrytypes.SearchResults, error) { + // TODO Use ctx when searching for repositories + if err := validateNoScheme(term); err != nil { + return nil, err + } + + indexName, remoteName := splitReposSearchTerm(term) + + // Search is a long-running operation, just lock s.config to avoid block others. + s.mu.Lock() + index, err := newIndexInfo(s.config, indexName) + s.mu.Unlock() + + if err != nil { + return nil, err + } + + // *TODO: Search multiple indexes. + endpoint, err := NewV1Endpoint(index, userAgent, headers) + if err != nil { + return nil, err + } + + var client *http.Client + if authConfig != nil && authConfig.IdentityToken != "" && authConfig.Username != "" { + creds := NewStaticCredentialStore(authConfig) + scopes := []auth.Scope{ + auth.RegistryScope{ + Name: "catalog", + Actions: []string{"search"}, + }, + } + + modifiers := Headers(userAgent, nil) + v2Client, foundV2, err := v2AuthHTTPClient(endpoint.URL, endpoint.client.Transport, modifiers, creds, scopes) + if err != nil { + if fErr, ok := err.(fallbackError); ok { + logrus.Errorf("Cannot use identity token for search, v2 auth not supported: %v", fErr.err) + } else { + return nil, err + } + } else if foundV2 { + // Copy non transport http client features + v2Client.Timeout = endpoint.client.Timeout + v2Client.CheckRedirect = endpoint.client.CheckRedirect + v2Client.Jar = endpoint.client.Jar + + logrus.Debugf("using v2 client for search to %s", endpoint.URL) + client = v2Client + } + } + + if client == nil { + client = endpoint.client + if err := authorizeClient(client, authConfig, endpoint); err != nil { + return nil, err + } + } + + r := newSession(client, authConfig, endpoint) + + if index.Official { + // If pull "library/foo", it's stored locally under "foo" + remoteName = strings.TrimPrefix(remoteName, "library/") + } + return r.SearchRepositories(remoteName, limit) +} + +// ResolveRepository splits a repository name into its components +// and configuration of the associated registry. +func (s *DefaultService) ResolveRepository(name reference.Named) (*RepositoryInfo, error) { + s.mu.Lock() + defer s.mu.Unlock() + return newRepositoryInfo(s.config, name) +} + +// APIEndpoint represents a remote API endpoint +type APIEndpoint struct { + Mirror bool + URL *url.URL + Version APIVersion + AllowNondistributableArtifacts bool + Official bool + TrimHostname bool + TLSConfig *tls.Config +} + +// ToV1Endpoint returns a V1 API endpoint based on the APIEndpoint +// Deprecated: this function is deprecated and will be removed in a future update +func (e APIEndpoint) ToV1Endpoint(userAgent string, metaHeaders http.Header) *V1Endpoint { + return newV1Endpoint(*e.URL, e.TLSConfig, userAgent, metaHeaders) +} + +// TLSConfig constructs a client TLS configuration based on server defaults +func (s *DefaultService) TLSConfig(hostname string) (*tls.Config, error) { + s.mu.Lock() + defer s.mu.Unlock() + + return newTLSConfig(hostname, isSecureIndex(s.config, hostname)) +} + +// tlsConfig constructs a client TLS configuration based on server defaults +func (s *DefaultService) tlsConfig(hostname string) (*tls.Config, error) { + return newTLSConfig(hostname, isSecureIndex(s.config, hostname)) +} + +func (s *DefaultService) tlsConfigForMirror(mirrorURL *url.URL) (*tls.Config, error) { + return s.tlsConfig(mirrorURL.Host) +} + +// LookupPullEndpoints creates a list of v2 endpoints to try to pull from, in order of preference. +// It gives preference to mirrors over the actual registry, and HTTPS over plain HTTP. +func (s *DefaultService) LookupPullEndpoints(hostname string) (endpoints []APIEndpoint, err error) { + s.mu.Lock() + defer s.mu.Unlock() + + return s.lookupV2Endpoints(hostname) +} + +// LookupPushEndpoints creates a list of v2 endpoints to try to push to, in order of preference. +// It gives preference to HTTPS over plain HTTP. Mirrors are not included. +func (s *DefaultService) LookupPushEndpoints(hostname string) (endpoints []APIEndpoint, err error) { + s.mu.Lock() + defer s.mu.Unlock() + + allEndpoints, err := s.lookupV2Endpoints(hostname) + if err == nil { + for _, endpoint := range allEndpoints { + if !endpoint.Mirror { + endpoints = append(endpoints, endpoint) + } + } + } + return endpoints, err +} diff --git a/vendor/github.com/docker/docker/registry/service_v2.go b/vendor/github.com/docker/docker/registry/service_v2.go new file mode 100644 index 0000000000..3e3a5b41ff --- /dev/null +++ b/vendor/github.com/docker/docker/registry/service_v2.go @@ -0,0 +1,79 @@ +package registry // import "github.com/docker/docker/registry" + +import ( + "net/url" + "strings" + + "github.com/docker/go-connections/tlsconfig" +) + +func (s *DefaultService) lookupV2Endpoints(hostname string) (endpoints []APIEndpoint, err error) { + tlsConfig := tlsconfig.ServerDefault() + if hostname == DefaultNamespace || hostname == IndexHostname { + for _, mirror := range s.config.Mirrors { + if !strings.HasPrefix(mirror, "http://") && !strings.HasPrefix(mirror, "https://") { + mirror = "https://" + mirror + } + mirrorURL, err := url.Parse(mirror) + if err != nil { + return nil, err + } + mirrorTLSConfig, err := s.tlsConfigForMirror(mirrorURL) + if err != nil { + return nil, err + } + endpoints = append(endpoints, APIEndpoint{ + URL: mirrorURL, + Version: APIVersion2, + Mirror: true, + TrimHostname: true, + TLSConfig: mirrorTLSConfig, + }) + } + endpoints = append(endpoints, APIEndpoint{ + URL: DefaultV2Registry, + Version: APIVersion2, + Official: true, + TrimHostname: true, + TLSConfig: tlsConfig, + }) + + return endpoints, nil + } + + ana := allowNondistributableArtifacts(s.config, hostname) + + tlsConfig, err = s.tlsConfig(hostname) + if err != nil { + return nil, err + } + + endpoints = []APIEndpoint{ + { + URL: &url.URL{ + Scheme: "https", + Host: hostname, + }, + Version: APIVersion2, + AllowNondistributableArtifacts: ana, + TrimHostname: true, + TLSConfig: tlsConfig, + }, + } + + if tlsConfig.InsecureSkipVerify { + endpoints = append(endpoints, APIEndpoint{ + URL: &url.URL{ + Scheme: "http", + Host: hostname, + }, + Version: APIVersion2, + AllowNondistributableArtifacts: ana, + TrimHostname: true, + // used to check if supposed to be secure via InsecureSkipVerify + TLSConfig: tlsConfig, + }) + } + + return endpoints, nil +} diff --git a/vendor/github.com/docker/docker/registry/session.go b/vendor/github.com/docker/docker/registry/session.go new file mode 100644 index 0000000000..d34dc1e58a --- /dev/null +++ b/vendor/github.com/docker/docker/registry/session.go @@ -0,0 +1,227 @@ +package registry // import "github.com/docker/docker/registry" + +import ( + // this is required for some certificates + _ "crypto/sha512" + "encoding/json" + "fmt" + "net/http" + "net/http/cookiejar" + "net/url" + "strings" + "sync" + + "github.com/docker/docker/api/types" + registrytypes "github.com/docker/docker/api/types/registry" + "github.com/docker/docker/errdefs" + "github.com/docker/docker/pkg/ioutils" + "github.com/docker/docker/pkg/jsonmessage" + "github.com/docker/docker/pkg/stringid" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +// A Session is used to communicate with a V1 registry +type Session struct { + indexEndpoint *V1Endpoint + client *http.Client + // TODO(tiborvass): remove authConfig + authConfig *types.AuthConfig + id string +} + +type authTransport struct { + http.RoundTripper + *types.AuthConfig + + alwaysSetBasicAuth bool + token []string + + mu sync.Mutex // guards modReq + modReq map[*http.Request]*http.Request // original -> modified +} + +// AuthTransport handles the auth layer when communicating with a v1 registry (private or official) +// +// For private v1 registries, set alwaysSetBasicAuth to true. +// +// For the official v1 registry, if there isn't already an Authorization header in the request, +// but there is an X-Docker-Token header set to true, then Basic Auth will be used to set the Authorization header. +// After sending the request with the provided base http.RoundTripper, if an X-Docker-Token header, representing +// a token, is present in the response, then it gets cached and sent in the Authorization header of all subsequent +// requests. +// +// If the server sends a token without the client having requested it, it is ignored. +// +// This RoundTripper also has a CancelRequest method important for correct timeout handling. +func AuthTransport(base http.RoundTripper, authConfig *types.AuthConfig, alwaysSetBasicAuth bool) http.RoundTripper { + if base == nil { + base = http.DefaultTransport + } + return &authTransport{ + RoundTripper: base, + AuthConfig: authConfig, + alwaysSetBasicAuth: alwaysSetBasicAuth, + modReq: make(map[*http.Request]*http.Request), + } +} + +// cloneRequest returns a clone of the provided *http.Request. +// The clone is a shallow copy of the struct and its Header map. +func cloneRequest(r *http.Request) *http.Request { + // shallow copy of the struct + r2 := new(http.Request) + *r2 = *r + // deep copy of the Header + r2.Header = make(http.Header, len(r.Header)) + for k, s := range r.Header { + r2.Header[k] = append([]string(nil), s...) + } + + return r2 +} + +// RoundTrip changes an HTTP request's headers to add the necessary +// authentication-related headers +func (tr *authTransport) RoundTrip(orig *http.Request) (*http.Response, error) { + // Authorization should not be set on 302 redirect for untrusted locations. + // This logic mirrors the behavior in addRequiredHeadersToRedirectedRequests. + // As the authorization logic is currently implemented in RoundTrip, + // a 302 redirect is detected by looking at the Referrer header as go http package adds said header. + // This is safe as Docker doesn't set Referrer in other scenarios. + if orig.Header.Get("Referer") != "" && !trustedLocation(orig) { + return tr.RoundTripper.RoundTrip(orig) + } + + req := cloneRequest(orig) + tr.mu.Lock() + tr.modReq[orig] = req + tr.mu.Unlock() + + if tr.alwaysSetBasicAuth { + if tr.AuthConfig == nil { + return nil, errors.New("unexpected error: empty auth config") + } + req.SetBasicAuth(tr.Username, tr.Password) + return tr.RoundTripper.RoundTrip(req) + } + + // Don't override + if req.Header.Get("Authorization") == "" { + if req.Header.Get("X-Docker-Token") == "true" && tr.AuthConfig != nil && len(tr.Username) > 0 { + req.SetBasicAuth(tr.Username, tr.Password) + } else if len(tr.token) > 0 { + req.Header.Set("Authorization", "Token "+strings.Join(tr.token, ",")) + } + } + resp, err := tr.RoundTripper.RoundTrip(req) + if err != nil { + tr.mu.Lock() + delete(tr.modReq, orig) + tr.mu.Unlock() + return nil, err + } + if len(resp.Header["X-Docker-Token"]) > 0 { + tr.token = resp.Header["X-Docker-Token"] + } + resp.Body = &ioutils.OnEOFReader{ + Rc: resp.Body, + Fn: func() { + tr.mu.Lock() + delete(tr.modReq, orig) + tr.mu.Unlock() + }, + } + return resp, nil +} + +// CancelRequest cancels an in-flight request by closing its connection. +func (tr *authTransport) CancelRequest(req *http.Request) { + type canceler interface { + CancelRequest(*http.Request) + } + if cr, ok := tr.RoundTripper.(canceler); ok { + tr.mu.Lock() + modReq := tr.modReq[req] + delete(tr.modReq, req) + tr.mu.Unlock() + cr.CancelRequest(modReq) + } +} + +func authorizeClient(client *http.Client, authConfig *types.AuthConfig, endpoint *V1Endpoint) error { + var alwaysSetBasicAuth bool + + // If we're working with a standalone private registry over HTTPS, send Basic Auth headers + // alongside all our requests. + if endpoint.String() != IndexServer && endpoint.URL.Scheme == "https" { + info, err := endpoint.Ping() + if err != nil { + return err + } + if info.Standalone && authConfig != nil { + logrus.Debugf("Endpoint %s is eligible for private registry. Enabling decorator.", endpoint.String()) + alwaysSetBasicAuth = true + } + } + + // Annotate the transport unconditionally so that v2 can + // properly fallback on v1 when an image is not found. + client.Transport = AuthTransport(client.Transport, authConfig, alwaysSetBasicAuth) + + jar, err := cookiejar.New(nil) + if err != nil { + return errors.New("cookiejar.New is not supposed to return an error") + } + client.Jar = jar + + return nil +} + +func newSession(client *http.Client, authConfig *types.AuthConfig, endpoint *V1Endpoint) *Session { + return &Session{ + authConfig: authConfig, + client: client, + indexEndpoint: endpoint, + id: stringid.GenerateRandomID(), + } +} + +// NewSession creates a new session +// TODO(tiborvass): remove authConfig param once registry client v2 is vendored +func NewSession(client *http.Client, authConfig *types.AuthConfig, endpoint *V1Endpoint) (*Session, error) { + if err := authorizeClient(client, authConfig, endpoint); err != nil { + return nil, err + } + + return newSession(client, authConfig, endpoint), nil +} + +// SearchRepositories performs a search against the remote repository +func (r *Session) SearchRepositories(term string, limit int) (*registrytypes.SearchResults, error) { + if limit < 1 || limit > 100 { + return nil, errdefs.InvalidParameter(errors.Errorf("Limit %d is outside the range of [1, 100]", limit)) + } + logrus.Debugf("Index server: %s", r.indexEndpoint) + u := r.indexEndpoint.String() + "search?q=" + url.QueryEscape(term) + "&n=" + url.QueryEscape(fmt.Sprintf("%d", limit)) + + req, err := http.NewRequest(http.MethodGet, u, nil) + if err != nil { + return nil, errors.Wrap(errdefs.InvalidParameter(err), "Error building request") + } + // Have the AuthTransport send authentication, when logged in. + req.Header.Set("X-Docker-Token", "true") + res, err := r.client.Do(req) + if err != nil { + return nil, errdefs.System(err) + } + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + return nil, &jsonmessage.JSONError{ + Message: fmt.Sprintf("Unexpected status code %d", res.StatusCode), + Code: res.StatusCode, + } + } + result := new(registrytypes.SearchResults) + return result, errors.Wrap(json.NewDecoder(res.Body).Decode(result), "error decoding registry search results") +} diff --git a/vendor/github.com/docker/docker/registry/types.go b/vendor/github.com/docker/docker/registry/types.go new file mode 100644 index 0000000000..28ed2bfa5e --- /dev/null +++ b/vendor/github.com/docker/docker/registry/types.go @@ -0,0 +1,70 @@ +package registry // import "github.com/docker/docker/registry" + +import ( + "github.com/docker/distribution/reference" + registrytypes "github.com/docker/docker/api/types/registry" +) + +// RepositoryData tracks the image list, list of endpoints for a repository +type RepositoryData struct { + // ImgList is a list of images in the repository + ImgList map[string]*ImgData + // Endpoints is a list of endpoints returned in X-Docker-Endpoints + Endpoints []string +} + +// ImgData is used to transfer image checksums to and from the registry +type ImgData struct { + // ID is an opaque string that identifies the image + ID string `json:"id"` + Checksum string `json:"checksum,omitempty"` + ChecksumPayload string `json:"-"` + Tag string `json:",omitempty"` +} + +// PingResult contains the information returned when pinging a registry. It +// indicates the registry's version and whether the registry claims to be a +// standalone registry. +type PingResult struct { + // Version is the registry version supplied by the registry in an HTTP + // header + Version string `json:"version"` + // Standalone is set to true if the registry indicates it is a + // standalone registry in the X-Docker-Registry-Standalone + // header + Standalone bool `json:"standalone"` +} + +// APIVersion is an integral representation of an API version (presently +// either 1 or 2) +type APIVersion int + +func (av APIVersion) String() string { + return apiVersions[av] +} + +// API Version identifiers. +const ( + _ = iota + APIVersion1 APIVersion = iota + APIVersion2 +) + +var apiVersions = map[APIVersion]string{ + APIVersion1: "v1", + APIVersion2: "v2", +} + +// RepositoryInfo describes a repository +type RepositoryInfo struct { + Name reference.Named + // Index points to registry information + Index *registrytypes.IndexInfo + // Official indicates whether the repository is considered official. + // If the registry is official, and the normalized name does not + // contain a '/' (e.g. "foo"), then it is considered an official repo. + Official bool + // Class represents the class of the repository, such as "plugin" + // or "image". + Class string +} diff --git a/vendor/github.com/docker/docker/rootless/rootless.go b/vendor/github.com/docker/docker/rootless/rootless.go new file mode 100644 index 0000000000..376d5263de --- /dev/null +++ b/vendor/github.com/docker/docker/rootless/rootless.go @@ -0,0 +1,25 @@ +package rootless // import "github.com/docker/docker/rootless" + +import ( + "os" + "sync" +) + +const ( + // RootlessKitDockerProxyBinary is the binary name of rootlesskit-docker-proxy + RootlessKitDockerProxyBinary = "rootlesskit-docker-proxy" +) + +var ( + runningWithRootlessKit bool + runningWithRootlessKitOnce sync.Once +) + +// RunningWithRootlessKit returns true if running under RootlessKit namespaces. +func RunningWithRootlessKit() bool { + runningWithRootlessKitOnce.Do(func() { + u := os.Getenv("ROOTLESSKIT_STATE_DIR") + runningWithRootlessKit = u != "" + }) + return runningWithRootlessKit +} diff --git a/vendor/github.com/docker/go-connections/LICENSE b/vendor/github.com/docker/go-connections/LICENSE new file mode 100644 index 0000000000..b55b37bc31 --- /dev/null +++ b/vendor/github.com/docker/go-connections/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/go-connections/nat/nat.go b/vendor/github.com/docker/go-connections/nat/nat.go new file mode 100644 index 0000000000..bb7e4e3369 --- /dev/null +++ b/vendor/github.com/docker/go-connections/nat/nat.go @@ -0,0 +1,242 @@ +// Package nat is a convenience package for manipulation of strings describing network ports. +package nat + +import ( + "fmt" + "net" + "strconv" + "strings" +) + +const ( + // portSpecTemplate is the expected format for port specifications + portSpecTemplate = "ip:hostPort:containerPort" +) + +// PortBinding represents a binding between a Host IP address and a Host Port +type PortBinding struct { + // HostIP is the host IP Address + HostIP string `json:"HostIp"` + // HostPort is the host port number + HostPort string +} + +// PortMap is a collection of PortBinding indexed by Port +type PortMap map[Port][]PortBinding + +// PortSet is a collection of structs indexed by Port +type PortSet map[Port]struct{} + +// Port is a string containing port number and protocol in the format "80/tcp" +type Port string + +// NewPort creates a new instance of a Port given a protocol and port number or port range +func NewPort(proto, port string) (Port, error) { + // Check for parsing issues on "port" now so we can avoid having + // to check it later on. + + portStartInt, portEndInt, err := ParsePortRangeToInt(port) + if err != nil { + return "", err + } + + if portStartInt == portEndInt { + return Port(fmt.Sprintf("%d/%s", portStartInt, proto)), nil + } + return Port(fmt.Sprintf("%d-%d/%s", portStartInt, portEndInt, proto)), nil +} + +// ParsePort parses the port number string and returns an int +func ParsePort(rawPort string) (int, error) { + if len(rawPort) == 0 { + return 0, nil + } + port, err := strconv.ParseUint(rawPort, 10, 16) + if err != nil { + return 0, err + } + return int(port), nil +} + +// ParsePortRangeToInt parses the port range string and returns start/end ints +func ParsePortRangeToInt(rawPort string) (int, int, error) { + if len(rawPort) == 0 { + return 0, 0, nil + } + start, end, err := ParsePortRange(rawPort) + if err != nil { + return 0, 0, err + } + return int(start), int(end), nil +} + +// Proto returns the protocol of a Port +func (p Port) Proto() string { + proto, _ := SplitProtoPort(string(p)) + return proto +} + +// Port returns the port number of a Port +func (p Port) Port() string { + _, port := SplitProtoPort(string(p)) + return port +} + +// Int returns the port number of a Port as an int +func (p Port) Int() int { + portStr := p.Port() + // We don't need to check for an error because we're going to + // assume that any error would have been found, and reported, in NewPort() + port, _ := ParsePort(portStr) + return port +} + +// Range returns the start/end port numbers of a Port range as ints +func (p Port) Range() (int, int, error) { + return ParsePortRangeToInt(p.Port()) +} + +// SplitProtoPort splits a port in the format of proto/port +func SplitProtoPort(rawPort string) (string, string) { + parts := strings.Split(rawPort, "/") + l := len(parts) + if len(rawPort) == 0 || l == 0 || len(parts[0]) == 0 { + return "", "" + } + if l == 1 { + return "tcp", rawPort + } + if len(parts[1]) == 0 { + return "tcp", parts[0] + } + return parts[1], parts[0] +} + +func validateProto(proto string) bool { + for _, availableProto := range []string{"tcp", "udp", "sctp"} { + if availableProto == proto { + return true + } + } + return false +} + +// ParsePortSpecs receives port specs in the format of ip:public:private/proto and parses +// these in to the internal types +func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, error) { + var ( + exposedPorts = make(map[Port]struct{}, len(ports)) + bindings = make(map[Port][]PortBinding) + ) + for _, rawPort := range ports { + portMappings, err := ParsePortSpec(rawPort) + if err != nil { + return nil, nil, err + } + + for _, portMapping := range portMappings { + port := portMapping.Port + if _, exists := exposedPorts[port]; !exists { + exposedPorts[port] = struct{}{} + } + bslice, exists := bindings[port] + if !exists { + bslice = []PortBinding{} + } + bindings[port] = append(bslice, portMapping.Binding) + } + } + return exposedPorts, bindings, nil +} + +// PortMapping is a data object mapping a Port to a PortBinding +type PortMapping struct { + Port Port + Binding PortBinding +} + +func splitParts(rawport string) (string, string, string) { + parts := strings.Split(rawport, ":") + n := len(parts) + containerport := parts[n-1] + + switch n { + case 1: + return "", "", containerport + case 2: + return "", parts[0], containerport + case 3: + return parts[0], parts[1], containerport + default: + return strings.Join(parts[:n-2], ":"), parts[n-2], containerport + } +} + +// ParsePortSpec parses a port specification string into a slice of PortMappings +func ParsePortSpec(rawPort string) ([]PortMapping, error) { + var proto string + rawIP, hostPort, containerPort := splitParts(rawPort) + proto, containerPort = SplitProtoPort(containerPort) + + // Strip [] from IPV6 addresses + ip, _, err := net.SplitHostPort(rawIP + ":") + if err != nil { + return nil, fmt.Errorf("Invalid ip address %v: %s", rawIP, err) + } + if ip != "" && net.ParseIP(ip) == nil { + return nil, fmt.Errorf("Invalid ip address: %s", ip) + } + if containerPort == "" { + return nil, fmt.Errorf("No port specified: %s", rawPort) + } + + startPort, endPort, err := ParsePortRange(containerPort) + if err != nil { + return nil, fmt.Errorf("Invalid containerPort: %s", containerPort) + } + + var startHostPort, endHostPort uint64 = 0, 0 + if len(hostPort) > 0 { + startHostPort, endHostPort, err = ParsePortRange(hostPort) + if err != nil { + return nil, fmt.Errorf("Invalid hostPort: %s", hostPort) + } + } + + if hostPort != "" && (endPort-startPort) != (endHostPort-startHostPort) { + // Allow host port range iff containerPort is not a range. + // In this case, use the host port range as the dynamic + // host port range to allocate into. + if endPort != startPort { + return nil, fmt.Errorf("Invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort) + } + } + + if !validateProto(strings.ToLower(proto)) { + return nil, fmt.Errorf("Invalid proto: %s", proto) + } + + ports := []PortMapping{} + for i := uint64(0); i <= (endPort - startPort); i++ { + containerPort = strconv.FormatUint(startPort+i, 10) + if len(hostPort) > 0 { + hostPort = strconv.FormatUint(startHostPort+i, 10) + } + // Set hostPort to a range only if there is a single container port + // and a dynamic host port. + if startPort == endPort && startHostPort != endHostPort { + hostPort = fmt.Sprintf("%s-%s", hostPort, strconv.FormatUint(endHostPort, 10)) + } + port, err := NewPort(strings.ToLower(proto), containerPort) + if err != nil { + return nil, err + } + + binding := PortBinding{ + HostIP: ip, + HostPort: hostPort, + } + ports = append(ports, PortMapping{Port: port, Binding: binding}) + } + return ports, nil +} diff --git a/vendor/github.com/docker/go-connections/nat/parse.go b/vendor/github.com/docker/go-connections/nat/parse.go new file mode 100644 index 0000000000..892adf8c66 --- /dev/null +++ b/vendor/github.com/docker/go-connections/nat/parse.go @@ -0,0 +1,57 @@ +package nat + +import ( + "fmt" + "strconv" + "strings" +) + +// PartParser parses and validates the specified string (data) using the specified template +// e.g. ip:public:private -> 192.168.0.1:80:8000 +// DEPRECATED: do not use, this function may be removed in a future version +func PartParser(template, data string) (map[string]string, error) { + // ip:public:private + var ( + templateParts = strings.Split(template, ":") + parts = strings.Split(data, ":") + out = make(map[string]string, len(templateParts)) + ) + if len(parts) != len(templateParts) { + return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template) + } + + for i, t := range templateParts { + value := "" + if len(parts) > i { + value = parts[i] + } + out[t] = value + } + return out, nil +} + +// ParsePortRange parses and validates the specified string as a port-range (8000-9000) +func ParsePortRange(ports string) (uint64, uint64, error) { + if ports == "" { + return 0, 0, fmt.Errorf("Empty string specified for ports.") + } + if !strings.Contains(ports, "-") { + start, err := strconv.ParseUint(ports, 10, 16) + end := start + return start, end, err + } + + parts := strings.Split(ports, "-") + start, err := strconv.ParseUint(parts[0], 10, 16) + if err != nil { + return 0, 0, err + } + end, err := strconv.ParseUint(parts[1], 10, 16) + if err != nil { + return 0, 0, err + } + if end < start { + return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports) + } + return start, end, nil +} diff --git a/vendor/github.com/docker/go-connections/nat/sort.go b/vendor/github.com/docker/go-connections/nat/sort.go new file mode 100644 index 0000000000..ce950171e3 --- /dev/null +++ b/vendor/github.com/docker/go-connections/nat/sort.go @@ -0,0 +1,96 @@ +package nat + +import ( + "sort" + "strings" +) + +type portSorter struct { + ports []Port + by func(i, j Port) bool +} + +func (s *portSorter) Len() int { + return len(s.ports) +} + +func (s *portSorter) Swap(i, j int) { + s.ports[i], s.ports[j] = s.ports[j], s.ports[i] +} + +func (s *portSorter) Less(i, j int) bool { + ip := s.ports[i] + jp := s.ports[j] + + return s.by(ip, jp) +} + +// Sort sorts a list of ports using the provided predicate +// This function should compare `i` and `j`, returning true if `i` is +// considered to be less than `j` +func Sort(ports []Port, predicate func(i, j Port) bool) { + s := &portSorter{ports, predicate} + sort.Sort(s) +} + +type portMapEntry struct { + port Port + binding PortBinding +} + +type portMapSorter []portMapEntry + +func (s portMapSorter) Len() int { return len(s) } +func (s portMapSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// sort the port so that the order is: +// 1. port with larger specified bindings +// 2. larger port +// 3. port with tcp protocol +func (s portMapSorter) Less(i, j int) bool { + pi, pj := s[i].port, s[j].port + hpi, hpj := toInt(s[i].binding.HostPort), toInt(s[j].binding.HostPort) + return hpi > hpj || pi.Int() > pj.Int() || (pi.Int() == pj.Int() && strings.ToLower(pi.Proto()) == "tcp") +} + +// SortPortMap sorts the list of ports and their respected mapping. The ports +// will explicit HostPort will be placed first. +func SortPortMap(ports []Port, bindings PortMap) { + s := portMapSorter{} + for _, p := range ports { + if binding, ok := bindings[p]; ok { + for _, b := range binding { + s = append(s, portMapEntry{port: p, binding: b}) + } + bindings[p] = []PortBinding{} + } else { + s = append(s, portMapEntry{port: p}) + } + } + + sort.Sort(s) + var ( + i int + pm = make(map[Port]struct{}) + ) + // reorder ports + for _, entry := range s { + if _, ok := pm[entry.port]; !ok { + ports[i] = entry.port + pm[entry.port] = struct{}{} + i++ + } + // reorder bindings for this port + if _, ok := bindings[entry.port]; ok { + bindings[entry.port] = append(bindings[entry.port], entry.binding) + } + } +} + +func toInt(s string) uint64 { + i, _, err := ParsePortRange(s) + if err != nil { + i = 0 + } + return i +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go new file mode 100644 index 0000000000..1ca0965e06 --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/certpool_go17.go @@ -0,0 +1,18 @@ +// +build go1.7 + +package tlsconfig + +import ( + "crypto/x509" + "runtime" +) + +// SystemCertPool returns a copy of the system cert pool, +// returns an error if failed to load or empty pool on windows. +func SystemCertPool() (*x509.CertPool, error) { + certpool, err := x509.SystemCertPool() + if err != nil && runtime.GOOS == "windows" { + return x509.NewCertPool(), nil + } + return certpool, err +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go b/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go new file mode 100644 index 0000000000..1ff81c333c --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/certpool_other.go @@ -0,0 +1,13 @@ +// +build !go1.7 + +package tlsconfig + +import ( + "crypto/x509" +) + +// SystemCertPool returns an new empty cert pool, +// accessing system cert pool is supported in go 1.7 +func SystemCertPool() (*x509.CertPool, error) { + return x509.NewCertPool(), nil +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config.go b/vendor/github.com/docker/go-connections/tlsconfig/config.go new file mode 100644 index 0000000000..0ef3fdcb46 --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/config.go @@ -0,0 +1,254 @@ +// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. +// +// As a reminder from https://golang.org/pkg/crypto/tls/#Config: +// A Config structure is used to configure a TLS client or server. After one has been passed to a TLS function it must not be modified. +// A Config may be reused; the tls package will also not modify it. +package tlsconfig + +import ( + "crypto/tls" + "crypto/x509" + "encoding/pem" + "fmt" + "io/ioutil" + "os" + + "github.com/pkg/errors" +) + +// Options represents the information needed to create client and server TLS configurations. +type Options struct { + CAFile string + + // If either CertFile or KeyFile is empty, Client() will not load them + // preventing the client from authenticating to the server. + // However, Server() requires them and will error out if they are empty. + CertFile string + KeyFile string + + // client-only option + InsecureSkipVerify bool + // server-only option + ClientAuth tls.ClientAuthType + // If ExclusiveRootPools is set, then if a CA file is provided, the root pool used for TLS + // creds will include exclusively the roots in that CA file. If no CA file is provided, + // the system pool will be used. + ExclusiveRootPools bool + MinVersion uint16 + // If Passphrase is set, it will be used to decrypt a TLS private key + // if the key is encrypted + Passphrase string +} + +// Extra (server-side) accepted CBC cipher suites - will phase out in the future +var acceptedCBCCiphers = []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, +} + +// DefaultServerAcceptedCiphers should be uses by code which already has a crypto/tls +// options struct but wants to use a commonly accepted set of TLS cipher suites, with +// known weak algorithms removed. +var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...) + +// allTLSVersions lists all the TLS versions and is used by the code that validates +// a uint16 value as a TLS version. +var allTLSVersions = map[uint16]struct{}{ + tls.VersionSSL30: {}, + tls.VersionTLS10: {}, + tls.VersionTLS11: {}, + tls.VersionTLS12: {}, +} + +// ServerDefault returns a secure-enough TLS configuration for the server TLS configuration. +func ServerDefault(ops ...func(*tls.Config)) *tls.Config { + tlsconfig := &tls.Config{ + // Avoid fallback by default to SSL protocols < TLS1.2 + MinVersion: tls.VersionTLS12, + PreferServerCipherSuites: true, + CipherSuites: DefaultServerAcceptedCiphers, + } + + for _, op := range ops { + op(tlsconfig) + } + + return tlsconfig +} + +// ClientDefault returns a secure-enough TLS configuration for the client TLS configuration. +func ClientDefault(ops ...func(*tls.Config)) *tls.Config { + tlsconfig := &tls.Config{ + // Prefer TLS1.2 as the client minimum + MinVersion: tls.VersionTLS12, + CipherSuites: clientCipherSuites, + } + + for _, op := range ops { + op(tlsconfig) + } + + return tlsconfig +} + +// certPool returns an X.509 certificate pool from `caFile`, the certificate file. +func certPool(caFile string, exclusivePool bool) (*x509.CertPool, error) { + // If we should verify the server, we need to load a trusted ca + var ( + certPool *x509.CertPool + err error + ) + if exclusivePool { + certPool = x509.NewCertPool() + } else { + certPool, err = SystemCertPool() + if err != nil { + return nil, fmt.Errorf("failed to read system certificates: %v", err) + } + } + pem, err := ioutil.ReadFile(caFile) + if err != nil { + return nil, fmt.Errorf("could not read CA certificate %q: %v", caFile, err) + } + if !certPool.AppendCertsFromPEM(pem) { + return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile) + } + return certPool, nil +} + +// isValidMinVersion checks that the input value is a valid tls minimum version +func isValidMinVersion(version uint16) bool { + _, ok := allTLSVersions[version] + return ok +} + +// adjustMinVersion sets the MinVersion on `config`, the input configuration. +// It assumes the current MinVersion on the `config` is the lowest allowed. +func adjustMinVersion(options Options, config *tls.Config) error { + if options.MinVersion > 0 { + if !isValidMinVersion(options.MinVersion) { + return fmt.Errorf("Invalid minimum TLS version: %x", options.MinVersion) + } + if options.MinVersion < config.MinVersion { + return fmt.Errorf("Requested minimum TLS version is too low. Should be at-least: %x", config.MinVersion) + } + config.MinVersion = options.MinVersion + } + + return nil +} + +// IsErrEncryptedKey returns true if the 'err' is an error of incorrect +// password when tryin to decrypt a TLS private key +func IsErrEncryptedKey(err error) bool { + return errors.Cause(err) == x509.IncorrectPasswordError +} + +// getPrivateKey returns the private key in 'keyBytes', in PEM-encoded format. +// If the private key is encrypted, 'passphrase' is used to decrypted the +// private key. +func getPrivateKey(keyBytes []byte, passphrase string) ([]byte, error) { + // this section makes some small changes to code from notary/tuf/utils/x509.go + pemBlock, _ := pem.Decode(keyBytes) + if pemBlock == nil { + return nil, fmt.Errorf("no valid private key found") + } + + var err error + if x509.IsEncryptedPEMBlock(pemBlock) { + keyBytes, err = x509.DecryptPEMBlock(pemBlock, []byte(passphrase)) + if err != nil { + return nil, errors.Wrap(err, "private key is encrypted, but could not decrypt it") + } + keyBytes = pem.EncodeToMemory(&pem.Block{Type: pemBlock.Type, Bytes: keyBytes}) + } + + return keyBytes, nil +} + +// getCert returns a Certificate from the CertFile and KeyFile in 'options', +// if the key is encrypted, the Passphrase in 'options' will be used to +// decrypt it. +func getCert(options Options) ([]tls.Certificate, error) { + if options.CertFile == "" && options.KeyFile == "" { + return nil, nil + } + + errMessage := "Could not load X509 key pair" + + cert, err := ioutil.ReadFile(options.CertFile) + if err != nil { + return nil, errors.Wrap(err, errMessage) + } + + prKeyBytes, err := ioutil.ReadFile(options.KeyFile) + if err != nil { + return nil, errors.Wrap(err, errMessage) + } + + prKeyBytes, err = getPrivateKey(prKeyBytes, options.Passphrase) + if err != nil { + return nil, errors.Wrap(err, errMessage) + } + + tlsCert, err := tls.X509KeyPair(cert, prKeyBytes) + if err != nil { + return nil, errors.Wrap(err, errMessage) + } + + return []tls.Certificate{tlsCert}, nil +} + +// Client returns a TLS configuration meant to be used by a client. +func Client(options Options) (*tls.Config, error) { + tlsConfig := ClientDefault() + tlsConfig.InsecureSkipVerify = options.InsecureSkipVerify + if !options.InsecureSkipVerify && options.CAFile != "" { + CAs, err := certPool(options.CAFile, options.ExclusiveRootPools) + if err != nil { + return nil, err + } + tlsConfig.RootCAs = CAs + } + + tlsCerts, err := getCert(options) + if err != nil { + return nil, err + } + tlsConfig.Certificates = tlsCerts + + if err := adjustMinVersion(options, tlsConfig); err != nil { + return nil, err + } + + return tlsConfig, nil +} + +// Server returns a TLS configuration meant to be used by a server. +func Server(options Options) (*tls.Config, error) { + tlsConfig := ServerDefault() + tlsConfig.ClientAuth = options.ClientAuth + tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) + if err != nil { + if os.IsNotExist(err) { + return nil, fmt.Errorf("Could not load X509 key pair (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err) + } + return nil, fmt.Errorf("Error reading X509 key pair (cert: %q, key: %q): %v. Make sure the key is not encrypted.", options.CertFile, options.KeyFile, err) + } + tlsConfig.Certificates = []tls.Certificate{tlsCert} + if options.ClientAuth >= tls.VerifyClientCertIfGiven && options.CAFile != "" { + CAs, err := certPool(options.CAFile, options.ExclusiveRootPools) + if err != nil { + return nil, err + } + tlsConfig.ClientCAs = CAs + } + + if err := adjustMinVersion(options, tlsConfig); err != nil { + return nil, err + } + + return tlsConfig, nil +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go b/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go new file mode 100644 index 0000000000..6b4c6a7c0d --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go @@ -0,0 +1,17 @@ +// +build go1.5 + +// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. +// +package tlsconfig + +import ( + "crypto/tls" +) + +// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) +var clientCipherSuites = []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, +} diff --git a/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go b/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go new file mode 100644 index 0000000000..ee22df47cb --- /dev/null +++ b/vendor/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go @@ -0,0 +1,15 @@ +// +build !go1.5 + +// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. +// +package tlsconfig + +import ( + "crypto/tls" +) + +// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) +var clientCipherSuites = []uint16{ + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, + tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, +} diff --git a/vendor/github.com/docker/go-metrics/CONTRIBUTING.md b/vendor/github.com/docker/go-metrics/CONTRIBUTING.md new file mode 100644 index 0000000000..b8a512c366 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/CONTRIBUTING.md @@ -0,0 +1,55 @@ +# Contributing + +## Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. diff --git a/vendor/github.com/docker/go-metrics/LICENSE b/vendor/github.com/docker/go-metrics/LICENSE new file mode 100644 index 0000000000..8f3fee627a --- /dev/null +++ b/vendor/github.com/docker/go-metrics/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/go-metrics/LICENSE.docs b/vendor/github.com/docker/go-metrics/LICENSE.docs new file mode 100644 index 0000000000..e26cd4fc8e --- /dev/null +++ b/vendor/github.com/docker/go-metrics/LICENSE.docs @@ -0,0 +1,425 @@ +Attribution-ShareAlike 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More_considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution-ShareAlike 4.0 International Public +License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution-ShareAlike 4.0 International Public License ("Public +License"). To the extent this Public License may be interpreted as a +contract, You are granted the Licensed Rights in consideration of Your +acceptance of these terms and conditions, and the Licensor grants You +such rights in consideration of benefits the Licensor receives from +making the Licensed Material available under these terms and +conditions. + + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. BY-SA Compatible License means a license listed at + creativecommons.org/compatiblelicenses, approved by Creative + Commons as essentially the equivalent of this Public License. + + d. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + + e. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + f. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + g. License Elements means the license attributes listed in the name + of a Creative Commons Public License. The License Elements of this + Public License are Attribution and ShareAlike. + + h. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + i. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + j. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + k. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + l. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + m. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part; and + + b. produce, reproduce, and Share Adapted Material. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. Additional offer from the Licensor -- Adapted Material. + Every recipient of Adapted Material from You + automatically receives an offer from the Licensor to + exercise the Licensed Rights in the Adapted Material + under the conditions of the Adapter's License You apply. + + c. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties. + + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + b. ShareAlike. + + In addition to the conditions in Section 3(a), if You Share + Adapted Material You produce, the following conditions also apply. + + 1. The Adapter's License You apply must be a Creative Commons + license with the same License Elements, this version or + later, or a BY-SA Compatible License. + + 2. You must include the text of, or the URI or hyperlink to, the + Adapter's License You apply. You may satisfy this condition + in any reasonable manner based on the medium, means, and + context in which You Share Adapted Material. + + 3. You may not offer or impose any additional or different terms + or conditions on, or apply any Effective Technological + Measures to, Adapted Material that restrict exercise of the + rights granted under the Adapter's License You apply. + + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material, + + including for purposes of Section 3(b); and + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + + +======================================================================= + +Creative Commons is not a party to its public licenses. +Notwithstanding, Creative Commons may elect to apply one of its public +licenses to material it publishes and in those instances will be +considered the "Licensor." Except for the limited purpose of indicating +that material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the public +licenses. + +Creative Commons may be contacted at creativecommons.org. diff --git a/vendor/github.com/docker/go-metrics/NOTICE b/vendor/github.com/docker/go-metrics/NOTICE new file mode 100644 index 0000000000..8915f02773 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/NOTICE @@ -0,0 +1,16 @@ +Docker +Copyright 2012-2015 Docker, Inc. + +This product includes software developed at Docker, Inc. (https://www.docker.com). + +The following is courtesy of our legal counsel: + + +Use and transfer of Docker may be subject to certain restrictions by the +United States and other governments. +It is your responsibility to ensure that your use and/or transfer does not +violate applicable laws. + +For more information, please see https://www.bis.doc.gov + +See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/vendor/github.com/docker/go-metrics/README.md b/vendor/github.com/docker/go-metrics/README.md new file mode 100644 index 0000000000..a9e947cb56 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/README.md @@ -0,0 +1,91 @@ +# go-metrics [![GoDoc](https://godoc.org/github.com/docker/go-metrics?status.svg)](https://godoc.org/github.com/docker/go-metrics) ![Badge Badge](http://doyouevenbadge.com/github.com/docker/go-metrics) + +This package is small wrapper around the prometheus go client to help enforce convention and best practices for metrics collection in Docker projects. + +## Best Practices + +This packages is meant to be used for collecting metrics in Docker projects. +It is not meant to be used as a replacement for the prometheus client but to help enforce consistent naming across metrics collected. +If you have not already read the prometheus best practices around naming and labels you can read the page [here](https://prometheus.io/docs/practices/naming/). + +The following are a few Docker specific rules that will help you name and work with metrics in your project. + +1. Namespace and Subsystem + +This package provides you with a namespace type that allows you to specify the same namespace and subsystem for your metrics. + +```go +ns := metrics.NewNamespace("engine", "daemon", metrics.Labels{ + "version": dockerversion.Version, + "commit": dockerversion.GitCommit, +}) +``` + +In the example above we are creating metrics for the Docker engine's daemon package. +`engine` would be the namespace in this example where `daemon` is the subsystem or package where we are collecting the metrics. + +A namespace also allows you to attach constant labels to the metrics such as the git commit and version that it is collecting. + +2. Declaring your Metrics + +Try to keep all your metric declarations in one file. +This makes it easy for others to see what constant labels are defined on the namespace and what labels are defined on the metrics when they are created. + +3. Use labels instead of multiple metrics + +Labels allow you to define one metric such as the time it takes to perform a certain action on an object. +If we wanted to collect timings on various container actions such as create, start, and delete then we can define one metric called `container_actions` and use labels to specify the type of action. + + +```go +containerActions = ns.NewLabeledTimer("container_actions", "The number of milliseconds it takes to process each container action", "action") +``` + +The last parameter is the label name or key. +When adding a data point to the metric you will use the `WithValues` function to specify the `action` that you are collecting for. + +```go +containerActions.WithValues("create").UpdateSince(start) +``` + +4. Always use a unit + +The metric name should describe what you are measuring but you also need to provide the unit that it is being measured with. +For a timer, the standard unit is seconds and a counter's standard unit is a total. +For gauges you must provide the unit. +This package provides a standard set of units for use within the Docker projects. + +```go +Nanoseconds Unit = "nanoseconds" +Seconds Unit = "seconds" +Bytes Unit = "bytes" +Total Unit = "total" +``` + +If you need to use a unit but it is not defined in the package please open a PR to add it but first try to see if one of the already created units will work for your metric, i.e. seconds or nanoseconds vs adding milliseconds. + +## Docs + +Package documentation can be found [here](https://godoc.org/github.com/docker/go-metrics). + +## HTTP Metrics + +To instrument a http handler, you can wrap the code like this: + +```go +namespace := metrics.NewNamespace("docker_distribution", "http", metrics.Labels{"handler": "your_http_handler_name"}) +httpMetrics := namespace.NewDefaultHttpMetrics() +metrics.Register(namespace) +instrumentedHandler = metrics.InstrumentHandler(httpMetrics, unInstrumentedHandler) +``` +Note: The `handler` label must be provided when a new namespace is created. + +## Additional Metrics + +Additional metrics are also defined here that are not available in the prometheus client. +If you need a custom metrics and it is generic enough to be used by multiple projects, define it here. + + +## Copyright and license + +Copyright © 2016 Docker, Inc. All rights reserved, except as follows. Code is released under the Apache 2.0 license. The README.md file, and files in the "docs" folder are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file "LICENSE.docs". You may obtain a duplicate copy of the same license, titled CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/. diff --git a/vendor/github.com/docker/go-metrics/counter.go b/vendor/github.com/docker/go-metrics/counter.go new file mode 100644 index 0000000000..fe36316a45 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/counter.go @@ -0,0 +1,52 @@ +package metrics + +import "github.com/prometheus/client_golang/prometheus" + +// Counter is a metrics that can only increment its current count +type Counter interface { + // Inc adds Sum(vs) to the counter. Sum(vs) must be positive. + // + // If len(vs) == 0, increments the counter by 1. + Inc(vs ...float64) +} + +// LabeledCounter is counter that must have labels populated before use. +type LabeledCounter interface { + WithValues(vs ...string) Counter +} + +type labeledCounter struct { + pc *prometheus.CounterVec +} + +func (lc *labeledCounter) WithValues(vs ...string) Counter { + return &counter{pc: lc.pc.WithLabelValues(vs...)} +} + +func (lc *labeledCounter) Describe(ch chan<- *prometheus.Desc) { + lc.pc.Describe(ch) +} + +func (lc *labeledCounter) Collect(ch chan<- prometheus.Metric) { + lc.pc.Collect(ch) +} + +type counter struct { + pc prometheus.Counter +} + +func (c *counter) Inc(vs ...float64) { + if len(vs) == 0 { + c.pc.Inc() + } + + c.pc.Add(sumFloat64(vs...)) +} + +func (c *counter) Describe(ch chan<- *prometheus.Desc) { + c.pc.Describe(ch) +} + +func (c *counter) Collect(ch chan<- prometheus.Metric) { + c.pc.Collect(ch) +} diff --git a/vendor/github.com/docker/go-metrics/docs.go b/vendor/github.com/docker/go-metrics/docs.go new file mode 100644 index 0000000000..8fbdfc697d --- /dev/null +++ b/vendor/github.com/docker/go-metrics/docs.go @@ -0,0 +1,3 @@ +// This package is small wrapper around the prometheus go client to help enforce convention and best practices for metrics collection in Docker projects. + +package metrics diff --git a/vendor/github.com/docker/go-metrics/gauge.go b/vendor/github.com/docker/go-metrics/gauge.go new file mode 100644 index 0000000000..74296e8774 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/gauge.go @@ -0,0 +1,72 @@ +package metrics + +import "github.com/prometheus/client_golang/prometheus" + +// Gauge is a metric that allows incrementing and decrementing a value +type Gauge interface { + Inc(...float64) + Dec(...float64) + + // Add adds the provided value to the gauge's current value + Add(float64) + + // Set replaces the gauge's current value with the provided value + Set(float64) +} + +// LabeledGauge describes a gauge the must have values populated before use. +type LabeledGauge interface { + WithValues(labels ...string) Gauge +} + +type labeledGauge struct { + pg *prometheus.GaugeVec +} + +func (lg *labeledGauge) WithValues(labels ...string) Gauge { + return &gauge{pg: lg.pg.WithLabelValues(labels...)} +} + +func (lg *labeledGauge) Describe(c chan<- *prometheus.Desc) { + lg.pg.Describe(c) +} + +func (lg *labeledGauge) Collect(c chan<- prometheus.Metric) { + lg.pg.Collect(c) +} + +type gauge struct { + pg prometheus.Gauge +} + +func (g *gauge) Inc(vs ...float64) { + if len(vs) == 0 { + g.pg.Inc() + } + + g.Add(sumFloat64(vs...)) +} + +func (g *gauge) Dec(vs ...float64) { + if len(vs) == 0 { + g.pg.Dec() + } + + g.Add(-sumFloat64(vs...)) +} + +func (g *gauge) Add(v float64) { + g.pg.Add(v) +} + +func (g *gauge) Set(v float64) { + g.pg.Set(v) +} + +func (g *gauge) Describe(c chan<- *prometheus.Desc) { + g.pg.Describe(c) +} + +func (g *gauge) Collect(c chan<- prometheus.Metric) { + g.pg.Collect(c) +} diff --git a/vendor/github.com/docker/go-metrics/handler.go b/vendor/github.com/docker/go-metrics/handler.go new file mode 100644 index 0000000000..05601e9ecd --- /dev/null +++ b/vendor/github.com/docker/go-metrics/handler.go @@ -0,0 +1,74 @@ +package metrics + +import ( + "net/http" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +// HTTPHandlerOpts describes a set of configurable options of http metrics +type HTTPHandlerOpts struct { + DurationBuckets []float64 + RequestSizeBuckets []float64 + ResponseSizeBuckets []float64 +} + +const ( + InstrumentHandlerResponseSize = iota + InstrumentHandlerRequestSize + InstrumentHandlerDuration + InstrumentHandlerCounter + InstrumentHandlerInFlight +) + +type HTTPMetric struct { + prometheus.Collector + handlerType int +} + +var ( + defaultDurationBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10, 25, 60} + defaultRequestSizeBuckets = prometheus.ExponentialBuckets(1024, 2, 22) //1K to 4G + defaultResponseSizeBuckets = defaultRequestSizeBuckets +) + +// Handler returns the global http.Handler that provides the prometheus +// metrics format on GET requests. This handler is no longer instrumented. +func Handler() http.Handler { + return promhttp.Handler() +} + +func InstrumentHandler(metrics []*HTTPMetric, handler http.Handler) http.HandlerFunc { + return InstrumentHandlerFunc(metrics, handler.ServeHTTP) +} + +func InstrumentHandlerFunc(metrics []*HTTPMetric, handlerFunc http.HandlerFunc) http.HandlerFunc { + var handler http.Handler + handler = http.HandlerFunc(handlerFunc) + for _, metric := range metrics { + switch metric.handlerType { + case InstrumentHandlerResponseSize: + if collector, ok := metric.Collector.(prometheus.ObserverVec); ok { + handler = promhttp.InstrumentHandlerResponseSize(collector, handler) + } + case InstrumentHandlerRequestSize: + if collector, ok := metric.Collector.(prometheus.ObserverVec); ok { + handler = promhttp.InstrumentHandlerRequestSize(collector, handler) + } + case InstrumentHandlerDuration: + if collector, ok := metric.Collector.(prometheus.ObserverVec); ok { + handler = promhttp.InstrumentHandlerDuration(collector, handler) + } + case InstrumentHandlerCounter: + if collector, ok := metric.Collector.(*prometheus.CounterVec); ok { + handler = promhttp.InstrumentHandlerCounter(collector, handler) + } + case InstrumentHandlerInFlight: + if collector, ok := metric.Collector.(prometheus.Gauge); ok { + handler = promhttp.InstrumentHandlerInFlight(collector, handler) + } + } + } + return handler.ServeHTTP +} diff --git a/vendor/github.com/docker/go-metrics/helpers.go b/vendor/github.com/docker/go-metrics/helpers.go new file mode 100644 index 0000000000..68b7f51b33 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/helpers.go @@ -0,0 +1,10 @@ +package metrics + +func sumFloat64(vs ...float64) float64 { + var sum float64 + for _, v := range vs { + sum += v + } + + return sum +} diff --git a/vendor/github.com/docker/go-metrics/namespace.go b/vendor/github.com/docker/go-metrics/namespace.go new file mode 100644 index 0000000000..798315451a --- /dev/null +++ b/vendor/github.com/docker/go-metrics/namespace.go @@ -0,0 +1,315 @@ +package metrics + +import ( + "fmt" + "sync" + + "github.com/prometheus/client_golang/prometheus" +) + +type Labels map[string]string + +// NewNamespace returns a namespaces that is responsible for managing a collection of +// metrics for a particual namespace and subsystem +// +// labels allows const labels to be added to all metrics created in this namespace +// and are commonly used for data like application version and git commit +func NewNamespace(name, subsystem string, labels Labels) *Namespace { + if labels == nil { + labels = make(map[string]string) + } + return &Namespace{ + name: name, + subsystem: subsystem, + labels: labels, + } +} + +// Namespace describes a set of metrics that share a namespace and subsystem. +type Namespace struct { + name string + subsystem string + labels Labels + mu sync.Mutex + metrics []prometheus.Collector +} + +// WithConstLabels returns a namespace with the provided set of labels merged +// with the existing constant labels on the namespace. +// +// Only metrics created with the returned namespace will get the new constant +// labels. The returned namespace must be registered separately. +func (n *Namespace) WithConstLabels(labels Labels) *Namespace { + n.mu.Lock() + ns := &Namespace{ + name: n.name, + subsystem: n.subsystem, + labels: mergeLabels(n.labels, labels), + } + n.mu.Unlock() + return ns +} + +func (n *Namespace) NewCounter(name, help string) Counter { + c := &counter{pc: prometheus.NewCounter(n.newCounterOpts(name, help))} + n.Add(c) + return c +} + +func (n *Namespace) NewLabeledCounter(name, help string, labels ...string) LabeledCounter { + c := &labeledCounter{pc: prometheus.NewCounterVec(n.newCounterOpts(name, help), labels)} + n.Add(c) + return c +} + +func (n *Namespace) newCounterOpts(name, help string) prometheus.CounterOpts { + return prometheus.CounterOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: makeName(name, Total), + Help: help, + ConstLabels: prometheus.Labels(n.labels), + } +} + +func (n *Namespace) NewTimer(name, help string) Timer { + t := &timer{ + m: prometheus.NewHistogram(n.newTimerOpts(name, help)), + } + n.Add(t) + return t +} + +func (n *Namespace) NewLabeledTimer(name, help string, labels ...string) LabeledTimer { + t := &labeledTimer{ + m: prometheus.NewHistogramVec(n.newTimerOpts(name, help), labels), + } + n.Add(t) + return t +} + +func (n *Namespace) newTimerOpts(name, help string) prometheus.HistogramOpts { + return prometheus.HistogramOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: makeName(name, Seconds), + Help: help, + ConstLabels: prometheus.Labels(n.labels), + } +} + +func (n *Namespace) NewGauge(name, help string, unit Unit) Gauge { + g := &gauge{ + pg: prometheus.NewGauge(n.newGaugeOpts(name, help, unit)), + } + n.Add(g) + return g +} + +func (n *Namespace) NewLabeledGauge(name, help string, unit Unit, labels ...string) LabeledGauge { + g := &labeledGauge{ + pg: prometheus.NewGaugeVec(n.newGaugeOpts(name, help, unit), labels), + } + n.Add(g) + return g +} + +func (n *Namespace) newGaugeOpts(name, help string, unit Unit) prometheus.GaugeOpts { + return prometheus.GaugeOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: makeName(name, unit), + Help: help, + ConstLabels: prometheus.Labels(n.labels), + } +} + +func (n *Namespace) Describe(ch chan<- *prometheus.Desc) { + n.mu.Lock() + defer n.mu.Unlock() + + for _, metric := range n.metrics { + metric.Describe(ch) + } +} + +func (n *Namespace) Collect(ch chan<- prometheus.Metric) { + n.mu.Lock() + defer n.mu.Unlock() + + for _, metric := range n.metrics { + metric.Collect(ch) + } +} + +func (n *Namespace) Add(collector prometheus.Collector) { + n.mu.Lock() + n.metrics = append(n.metrics, collector) + n.mu.Unlock() +} + +func (n *Namespace) NewDesc(name, help string, unit Unit, labels ...string) *prometheus.Desc { + name = makeName(name, unit) + namespace := n.name + if n.subsystem != "" { + namespace = fmt.Sprintf("%s_%s", namespace, n.subsystem) + } + name = fmt.Sprintf("%s_%s", namespace, name) + return prometheus.NewDesc(name, help, labels, prometheus.Labels(n.labels)) +} + +// mergeLabels merges two or more labels objects into a single map, favoring +// the later labels. +func mergeLabels(lbs ...Labels) Labels { + merged := make(Labels) + + for _, target := range lbs { + for k, v := range target { + merged[k] = v + } + } + + return merged +} + +func makeName(name string, unit Unit) string { + if unit == "" { + return name + } + + return fmt.Sprintf("%s_%s", name, unit) +} + +func (n *Namespace) NewDefaultHttpMetrics(handlerName string) []*HTTPMetric { + return n.NewHttpMetricsWithOpts(handlerName, HTTPHandlerOpts{ + DurationBuckets: defaultDurationBuckets, + RequestSizeBuckets: defaultResponseSizeBuckets, + ResponseSizeBuckets: defaultResponseSizeBuckets, + }) +} + +func (n *Namespace) NewHttpMetrics(handlerName string, durationBuckets, requestSizeBuckets, responseSizeBuckets []float64) []*HTTPMetric { + return n.NewHttpMetricsWithOpts(handlerName, HTTPHandlerOpts{ + DurationBuckets: durationBuckets, + RequestSizeBuckets: requestSizeBuckets, + ResponseSizeBuckets: responseSizeBuckets, + }) +} + +func (n *Namespace) NewHttpMetricsWithOpts(handlerName string, opts HTTPHandlerOpts) []*HTTPMetric { + var httpMetrics []*HTTPMetric + inFlightMetric := n.NewInFlightGaugeMetric(handlerName) + requestTotalMetric := n.NewRequestTotalMetric(handlerName) + requestDurationMetric := n.NewRequestDurationMetric(handlerName, opts.DurationBuckets) + requestSizeMetric := n.NewRequestSizeMetric(handlerName, opts.RequestSizeBuckets) + responseSizeMetric := n.NewResponseSizeMetric(handlerName, opts.ResponseSizeBuckets) + httpMetrics = append(httpMetrics, inFlightMetric, requestDurationMetric, requestTotalMetric, requestSizeMetric, responseSizeMetric) + return httpMetrics +} + +func (n *Namespace) NewInFlightGaugeMetric(handlerName string) *HTTPMetric { + labels := prometheus.Labels(n.labels) + labels["handler"] = handlerName + metric := prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: "in_flight_requests", + Help: "The in-flight HTTP requests", + ConstLabels: prometheus.Labels(labels), + }) + httpMetric := &HTTPMetric{ + Collector: metric, + handlerType: InstrumentHandlerInFlight, + } + n.Add(httpMetric) + return httpMetric +} + +func (n *Namespace) NewRequestTotalMetric(handlerName string) *HTTPMetric { + labels := prometheus.Labels(n.labels) + labels["handler"] = handlerName + metric := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: "requests_total", + Help: "Total number of HTTP requests made.", + ConstLabels: prometheus.Labels(labels), + }, + []string{"code", "method"}, + ) + httpMetric := &HTTPMetric{ + Collector: metric, + handlerType: InstrumentHandlerCounter, + } + n.Add(httpMetric) + return httpMetric +} +func (n *Namespace) NewRequestDurationMetric(handlerName string, buckets []float64) *HTTPMetric { + if len(buckets) == 0 { + panic("DurationBuckets must be provided") + } + labels := prometheus.Labels(n.labels) + labels["handler"] = handlerName + opts := prometheus.HistogramOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: "request_duration_seconds", + Help: "The HTTP request latencies in seconds.", + Buckets: buckets, + ConstLabels: prometheus.Labels(labels), + } + metric := prometheus.NewHistogramVec(opts, []string{"method"}) + httpMetric := &HTTPMetric{ + Collector: metric, + handlerType: InstrumentHandlerDuration, + } + n.Add(httpMetric) + return httpMetric +} + +func (n *Namespace) NewRequestSizeMetric(handlerName string, buckets []float64) *HTTPMetric { + if len(buckets) == 0 { + panic("RequestSizeBuckets must be provided") + } + labels := prometheus.Labels(n.labels) + labels["handler"] = handlerName + opts := prometheus.HistogramOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: "request_size_bytes", + Help: "The HTTP request sizes in bytes.", + Buckets: buckets, + ConstLabels: prometheus.Labels(labels), + } + metric := prometheus.NewHistogramVec(opts, []string{}) + httpMetric := &HTTPMetric{ + Collector: metric, + handlerType: InstrumentHandlerRequestSize, + } + n.Add(httpMetric) + return httpMetric +} + +func (n *Namespace) NewResponseSizeMetric(handlerName string, buckets []float64) *HTTPMetric { + if len(buckets) == 0 { + panic("ResponseSizeBuckets must be provided") + } + labels := prometheus.Labels(n.labels) + labels["handler"] = handlerName + opts := prometheus.HistogramOpts{ + Namespace: n.name, + Subsystem: n.subsystem, + Name: "response_size_bytes", + Help: "The HTTP response sizes in bytes.", + Buckets: buckets, + ConstLabels: prometheus.Labels(labels), + } + metrics := prometheus.NewHistogramVec(opts, []string{}) + httpMetric := &HTTPMetric{ + Collector: metrics, + handlerType: InstrumentHandlerResponseSize, + } + n.Add(httpMetric) + return httpMetric +} diff --git a/vendor/github.com/docker/go-metrics/register.go b/vendor/github.com/docker/go-metrics/register.go new file mode 100644 index 0000000000..708358df01 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/register.go @@ -0,0 +1,15 @@ +package metrics + +import "github.com/prometheus/client_golang/prometheus" + +// Register adds all the metrics in the provided namespace to the global +// metrics registry +func Register(n *Namespace) { + prometheus.MustRegister(n) +} + +// Deregister removes all the metrics in the provided namespace from the +// global metrics registry +func Deregister(n *Namespace) { + prometheus.Unregister(n) +} diff --git a/vendor/github.com/docker/go-metrics/timer.go b/vendor/github.com/docker/go-metrics/timer.go new file mode 100644 index 0000000000..824c98739c --- /dev/null +++ b/vendor/github.com/docker/go-metrics/timer.go @@ -0,0 +1,85 @@ +package metrics + +import ( + "time" + + "github.com/prometheus/client_golang/prometheus" +) + +// StartTimer begins a timer observation at the callsite. When the target +// operation is completed, the caller should call the return done func(). +func StartTimer(timer Timer) (done func()) { + start := time.Now() + return func() { + timer.Update(time.Since(start)) + } +} + +// Timer is a metric that allows collecting the duration of an action in seconds +type Timer interface { + // Update records an observation, duration, and converts to the target + // units. + Update(duration time.Duration) + + // UpdateSince will add the duration from the provided starting time to the + // timer's summary with the precisions that was used in creation of the timer + UpdateSince(time.Time) +} + +// LabeledTimer is a timer that must have label values populated before use. +type LabeledTimer interface { + WithValues(labels ...string) *labeledTimerObserver +} + +type labeledTimer struct { + m *prometheus.HistogramVec +} + +type labeledTimerObserver struct { + m prometheus.Observer +} + +func (lbo *labeledTimerObserver) Update(duration time.Duration) { + lbo.m.Observe(duration.Seconds()) +} + +func (lbo *labeledTimerObserver) UpdateSince(since time.Time) { + lbo.m.Observe(time.Since(since).Seconds()) +} + +func (lt *labeledTimer) WithValues(labels ...string) *labeledTimerObserver { + return &labeledTimerObserver{m: lt.m.WithLabelValues(labels...)} +} + +func (lt *labeledTimer) Describe(c chan<- *prometheus.Desc) { + lt.m.Describe(c) +} + +func (lt *labeledTimer) Collect(c chan<- prometheus.Metric) { + lt.m.Collect(c) +} + +type timer struct { + m prometheus.Observer +} + +func (t *timer) Update(duration time.Duration) { + t.m.Observe(duration.Seconds()) +} + +func (t *timer) UpdateSince(since time.Time) { + t.m.Observe(time.Since(since).Seconds()) +} + +func (t *timer) Describe(c chan<- *prometheus.Desc) { + c <- t.m.(prometheus.Metric).Desc() +} + +func (t *timer) Collect(c chan<- prometheus.Metric) { + // Are there any observers that don't implement Collector? It is really + // unclear what the point of the upstream change was, but we'll let this + // panic if we get an observer that doesn't implement collector. In this + // case, we should almost always see metricVec objects, so this should + // never panic. + t.m.(prometheus.Collector).Collect(c) +} diff --git a/vendor/github.com/docker/go-metrics/unit.go b/vendor/github.com/docker/go-metrics/unit.go new file mode 100644 index 0000000000..c96622f903 --- /dev/null +++ b/vendor/github.com/docker/go-metrics/unit.go @@ -0,0 +1,12 @@ +package metrics + +// Unit represents the type or precision of a metric that is appended to +// the metrics fully qualified name +type Unit string + +const ( + Nanoseconds Unit = "nanoseconds" + Seconds Unit = "seconds" + Bytes Unit = "bytes" + Total Unit = "total" +) diff --git a/vendor/github.com/docker/go-units/CONTRIBUTING.md b/vendor/github.com/docker/go-units/CONTRIBUTING.md new file mode 100644 index 0000000000..9ea86d784e --- /dev/null +++ b/vendor/github.com/docker/go-units/CONTRIBUTING.md @@ -0,0 +1,67 @@ +# Contributing to go-units + +Want to hack on go-units? Awesome! Here are instructions to get you started. + +go-units is a part of the [Docker](https://www.docker.com) project, and follows +the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read Docker's +[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), +[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), +[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and +[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. diff --git a/vendor/github.com/docker/go-units/LICENSE b/vendor/github.com/docker/go-units/LICENSE new file mode 100644 index 0000000000..b55b37bc31 --- /dev/null +++ b/vendor/github.com/docker/go-units/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/docker/go-units/MAINTAINERS b/vendor/github.com/docker/go-units/MAINTAINERS new file mode 100644 index 0000000000..4aac7c7411 --- /dev/null +++ b/vendor/github.com/docker/go-units/MAINTAINERS @@ -0,0 +1,46 @@ +# go-units maintainers file +# +# This file describes who runs the docker/go-units project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + [Org."Core maintainers"] + people = [ + "akihirosuda", + "dnephin", + "thajeztah", + "vdemeester", + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + + [people.akihirosuda] + Name = "Akihiro Suda" + Email = "akihiro.suda.cz@hco.ntt.co.jp" + GitHub = "AkihiroSuda" + + [people.dnephin] + Name = "Daniel Nephin" + Email = "dnephin@gmail.com" + GitHub = "dnephin" + + [people.thajeztah] + Name = "Sebastiaan van Stijn" + Email = "github@gone.nl" + GitHub = "thaJeztah" + + [people.vdemeester] + Name = "Vincent Demeester" + Email = "vincent@sbr.pm" + GitHub = "vdemeester" \ No newline at end of file diff --git a/vendor/github.com/docker/go-units/README.md b/vendor/github.com/docker/go-units/README.md new file mode 100644 index 0000000000..4f70a4e134 --- /dev/null +++ b/vendor/github.com/docker/go-units/README.md @@ -0,0 +1,16 @@ +[![GoDoc](https://godoc.org/github.com/docker/go-units?status.svg)](https://godoc.org/github.com/docker/go-units) + +# Introduction + +go-units is a library to transform human friendly measurements into machine friendly values. + +## Usage + +See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation. + +## Copyright and license + +Copyright © 2015 Docker, Inc. + +go-units is licensed under the Apache License, Version 2.0. +See [LICENSE](LICENSE) for the full text of the license. diff --git a/vendor/github.com/docker/go-units/circle.yml b/vendor/github.com/docker/go-units/circle.yml new file mode 100644 index 0000000000..af9d605529 --- /dev/null +++ b/vendor/github.com/docker/go-units/circle.yml @@ -0,0 +1,11 @@ +dependencies: + post: + # install golint + - go get golang.org/x/lint/golint + +test: + pre: + # run analysis before tests + - go vet ./... + - test -z "$(golint ./... | tee /dev/stderr)" + - test -z "$(gofmt -s -l . | tee /dev/stderr)" diff --git a/vendor/github.com/docker/go-units/duration.go b/vendor/github.com/docker/go-units/duration.go new file mode 100644 index 0000000000..48dd8744d4 --- /dev/null +++ b/vendor/github.com/docker/go-units/duration.go @@ -0,0 +1,35 @@ +// Package units provides helper function to parse and print size and time units +// in human-readable format. +package units + +import ( + "fmt" + "time" +) + +// HumanDuration returns a human-readable approximation of a duration +// (eg. "About a minute", "4 hours ago", etc.). +func HumanDuration(d time.Duration) string { + if seconds := int(d.Seconds()); seconds < 1 { + return "Less than a second" + } else if seconds == 1 { + return "1 second" + } else if seconds < 60 { + return fmt.Sprintf("%d seconds", seconds) + } else if minutes := int(d.Minutes()); minutes == 1 { + return "About a minute" + } else if minutes < 60 { + return fmt.Sprintf("%d minutes", minutes) + } else if hours := int(d.Hours() + 0.5); hours == 1 { + return "About an hour" + } else if hours < 48 { + return fmt.Sprintf("%d hours", hours) + } else if hours < 24*7*2 { + return fmt.Sprintf("%d days", hours/24) + } else if hours < 24*30*2 { + return fmt.Sprintf("%d weeks", hours/24/7) + } else if hours < 24*365*2 { + return fmt.Sprintf("%d months", hours/24/30) + } + return fmt.Sprintf("%d years", int(d.Hours())/24/365) +} diff --git a/vendor/github.com/docker/go-units/size.go b/vendor/github.com/docker/go-units/size.go new file mode 100644 index 0000000000..85f6ab0715 --- /dev/null +++ b/vendor/github.com/docker/go-units/size.go @@ -0,0 +1,108 @@ +package units + +import ( + "fmt" + "regexp" + "strconv" + "strings" +) + +// See: http://en.wikipedia.org/wiki/Binary_prefix +const ( + // Decimal + + KB = 1000 + MB = 1000 * KB + GB = 1000 * MB + TB = 1000 * GB + PB = 1000 * TB + + // Binary + + KiB = 1024 + MiB = 1024 * KiB + GiB = 1024 * MiB + TiB = 1024 * GiB + PiB = 1024 * TiB +) + +type unitMap map[string]int64 + +var ( + decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB} + binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB} + sizeRegex = regexp.MustCompile(`^(\d+(\.\d+)*) ?([kKmMgGtTpP])?[iI]?[bB]?$`) +) + +var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} +var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} + +func getSizeAndUnit(size float64, base float64, _map []string) (float64, string) { + i := 0 + unitsLimit := len(_map) - 1 + for size >= base && i < unitsLimit { + size = size / base + i++ + } + return size, _map[i] +} + +// CustomSize returns a human-readable approximation of a size +// using custom format. +func CustomSize(format string, size float64, base float64, _map []string) string { + size, unit := getSizeAndUnit(size, base, _map) + return fmt.Sprintf(format, size, unit) +} + +// HumanSizeWithPrecision allows the size to be in any precision, +// instead of 4 digit precision used in units.HumanSize. +func HumanSizeWithPrecision(size float64, precision int) string { + size, unit := getSizeAndUnit(size, 1000.0, decimapAbbrs) + return fmt.Sprintf("%.*g%s", precision, size, unit) +} + +// HumanSize returns a human-readable approximation of a size +// capped at 4 valid numbers (eg. "2.746 MB", "796 KB"). +func HumanSize(size float64) string { + return HumanSizeWithPrecision(size, 4) +} + +// BytesSize returns a human-readable size in bytes, kibibytes, +// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB"). +func BytesSize(size float64) string { + return CustomSize("%.4g%s", size, 1024.0, binaryAbbrs) +} + +// FromHumanSize returns an integer from a human-readable specification of a +// size using SI standard (eg. "44kB", "17MB"). +func FromHumanSize(size string) (int64, error) { + return parseSize(size, decimalMap) +} + +// RAMInBytes parses a human-readable string representing an amount of RAM +// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and +// returns the number of bytes, or -1 if the string is unparseable. +// Units are case-insensitive, and the 'b' suffix is optional. +func RAMInBytes(size string) (int64, error) { + return parseSize(size, binaryMap) +} + +// Parses the human-readable size string into the amount it represents. +func parseSize(sizeStr string, uMap unitMap) (int64, error) { + matches := sizeRegex.FindStringSubmatch(sizeStr) + if len(matches) != 4 { + return -1, fmt.Errorf("invalid size: '%s'", sizeStr) + } + + size, err := strconv.ParseFloat(matches[1], 64) + if err != nil { + return -1, err + } + + unitPrefix := strings.ToLower(matches[3]) + if mul, ok := uMap[unitPrefix]; ok { + size *= float64(mul) + } + + return int64(size), nil +} diff --git a/vendor/github.com/docker/go-units/ulimit.go b/vendor/github.com/docker/go-units/ulimit.go new file mode 100644 index 0000000000..fca0400cc8 --- /dev/null +++ b/vendor/github.com/docker/go-units/ulimit.go @@ -0,0 +1,123 @@ +package units + +import ( + "fmt" + "strconv" + "strings" +) + +// Ulimit is a human friendly version of Rlimit. +type Ulimit struct { + Name string + Hard int64 + Soft int64 +} + +// Rlimit specifies the resource limits, such as max open files. +type Rlimit struct { + Type int `json:"type,omitempty"` + Hard uint64 `json:"hard,omitempty"` + Soft uint64 `json:"soft,omitempty"` +} + +const ( + // magic numbers for making the syscall + // some of these are defined in the syscall package, but not all. + // Also since Windows client doesn't get access to the syscall package, need to + // define these here + rlimitAs = 9 + rlimitCore = 4 + rlimitCPU = 0 + rlimitData = 2 + rlimitFsize = 1 + rlimitLocks = 10 + rlimitMemlock = 8 + rlimitMsgqueue = 12 + rlimitNice = 13 + rlimitNofile = 7 + rlimitNproc = 6 + rlimitRss = 5 + rlimitRtprio = 14 + rlimitRttime = 15 + rlimitSigpending = 11 + rlimitStack = 3 +) + +var ulimitNameMapping = map[string]int{ + //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container. + "core": rlimitCore, + "cpu": rlimitCPU, + "data": rlimitData, + "fsize": rlimitFsize, + "locks": rlimitLocks, + "memlock": rlimitMemlock, + "msgqueue": rlimitMsgqueue, + "nice": rlimitNice, + "nofile": rlimitNofile, + "nproc": rlimitNproc, + "rss": rlimitRss, + "rtprio": rlimitRtprio, + "rttime": rlimitRttime, + "sigpending": rlimitSigpending, + "stack": rlimitStack, +} + +// ParseUlimit parses and returns a Ulimit from the specified string. +func ParseUlimit(val string) (*Ulimit, error) { + parts := strings.SplitN(val, "=", 2) + if len(parts) != 2 { + return nil, fmt.Errorf("invalid ulimit argument: %s", val) + } + + if _, exists := ulimitNameMapping[parts[0]]; !exists { + return nil, fmt.Errorf("invalid ulimit type: %s", parts[0]) + } + + var ( + soft int64 + hard = &soft // default to soft in case no hard was set + temp int64 + err error + ) + switch limitVals := strings.Split(parts[1], ":"); len(limitVals) { + case 2: + temp, err = strconv.ParseInt(limitVals[1], 10, 64) + if err != nil { + return nil, err + } + hard = &temp + fallthrough + case 1: + soft, err = strconv.ParseInt(limitVals[0], 10, 64) + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1]) + } + + if *hard != -1 { + if soft == -1 { + return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: soft: -1 (unlimited), hard: %d", *hard) + } + if soft > *hard { + return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, *hard) + } + } + + return &Ulimit{Name: parts[0], Soft: soft, Hard: *hard}, nil +} + +// GetRlimit returns the RLimit corresponding to Ulimit. +func (u *Ulimit) GetRlimit() (*Rlimit, error) { + t, exists := ulimitNameMapping[u.Name] + if !exists { + return nil, fmt.Errorf("invalid ulimit name %s", u.Name) + } + + return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil +} + +func (u *Ulimit) String() string { + return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard) +} diff --git a/vendor/github.com/exponent-io/jsonpath/.gitignore b/vendor/github.com/exponent-io/jsonpath/.gitignore new file mode 100644 index 0000000000..daf913b1b3 --- /dev/null +++ b/vendor/github.com/exponent-io/jsonpath/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/exponent-io/jsonpath/.travis.yml b/vendor/github.com/exponent-io/jsonpath/.travis.yml new file mode 100644 index 0000000000..f4f458a416 --- /dev/null +++ b/vendor/github.com/exponent-io/jsonpath/.travis.yml @@ -0,0 +1,5 @@ +language: go + +go: + - 1.5 + - tip diff --git a/vendor/github.com/exponent-io/jsonpath/LICENSE b/vendor/github.com/exponent-io/jsonpath/LICENSE new file mode 100644 index 0000000000..5419772507 --- /dev/null +++ b/vendor/github.com/exponent-io/jsonpath/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Exponent Labs LLC + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/exponent-io/jsonpath/README.md b/vendor/github.com/exponent-io/jsonpath/README.md new file mode 100644 index 0000000000..382fb3138c --- /dev/null +++ b/vendor/github.com/exponent-io/jsonpath/README.md @@ -0,0 +1,66 @@ +[![GoDoc](https://godoc.org/github.com/exponent-io/jsonpath?status.svg)](https://godoc.org/github.com/exponent-io/jsonpath) +[![Build Status](https://travis-ci.org/exponent-io/jsonpath.svg?branch=master)](https://travis-ci.org/exponent-io/jsonpath) + +# jsonpath + +This package extends the [json.Decoder](https://golang.org/pkg/encoding/json/#Decoder) to support navigating a stream of JSON tokens. You should be able to use this extended Decoder places where a json.Decoder would have been used. + +This Decoder has the following enhancements... + * The [Scan](https://godoc.org/github.com/exponent-io/jsonpath/#Decoder.Scan) method supports scanning a JSON stream while extracting particular values along the way using [PathActions](https://godoc.org/github.com/exponent-io/jsonpath#PathActions). + * The [SeekTo](https://godoc.org/github.com/exponent-io/jsonpath#Decoder.SeekTo) method supports seeking forward in a JSON token stream to a particular path. + * The [Path](https://godoc.org/github.com/exponent-io/jsonpath#Decoder.Path) method returns the path of the most recently parsed token. + * The [Token](https://godoc.org/github.com/exponent-io/jsonpath#Decoder.Token) method has been modified to distinguish between strings that are object keys and strings that are values. Object key strings are returned as the [KeyString](https://godoc.org/github.com/exponent-io/jsonpath#KeyString) type rather than a native string. + +## Installation + + go get -u github.com/exponent-io/jsonpath + +## Example Usage + +#### SeekTo + +```go +import "github.com/exponent-io/jsonpath" + +var j = []byte(`[ + {"Space": "YCbCr", "Point": {"Y": 255, "Cb": 0, "Cr": -10}}, + {"Space": "RGB", "Point": {"R": 98, "G": 218, "B": 255}} +]`) + +w := json.NewDecoder(bytes.NewReader(j)) +var v interface{} + +w.SeekTo(1, "Point", "G") +w.Decode(&v) // v is 218 +``` + +#### Scan with PathActions + +```go +var j = []byte(`{"colors":[ + {"Space": "YCbCr", "Point": {"Y": 255, "Cb": 0, "Cr": -10, "A": 58}}, + {"Space": "RGB", "Point": {"R": 98, "G": 218, "B": 255, "A": 231}} +]}`) + +var actions PathActions + +// Extract the value at Point.A +actions.Add(func(d *Decoder) error { + var alpha int + err := d.Decode(&alpha) + fmt.Printf("Alpha: %v\n", alpha) + return err +}, "Point", "A") + +w := NewDecoder(bytes.NewReader(j)) +w.SeekTo("colors", 0) + +var ok = true +var err error +for ok { + ok, err = w.Scan(&actions) + if err != nil && err != io.EOF { + panic(err) + } +} +``` diff --git a/vendor/github.com/exponent-io/jsonpath/decoder.go b/vendor/github.com/exponent-io/jsonpath/decoder.go new file mode 100644 index 0000000000..31de46c738 --- /dev/null +++ b/vendor/github.com/exponent-io/jsonpath/decoder.go @@ -0,0 +1,210 @@ +package jsonpath + +import ( + "encoding/json" + "io" +) + +// KeyString is returned from Decoder.Token to represent each key in a JSON object value. +type KeyString string + +// Decoder extends the Go runtime's encoding/json.Decoder to support navigating in a stream of JSON tokens. +type Decoder struct { + json.Decoder + + path JsonPath + context jsonContext +} + +// NewDecoder creates a new instance of the extended JSON Decoder. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{Decoder: *json.NewDecoder(r)} +} + +// SeekTo causes the Decoder to move forward to a given path in the JSON structure. +// +// The path argument must consist of strings or integers. Each string specifies an JSON object key, and +// each integer specifies an index into a JSON array. +// +// Consider the JSON structure +// +// { "a": [0,"s",12e4,{"b":0,"v":35} ] } +// +// SeekTo("a",3,"v") will move to the value referenced by the "a" key in the current object, +// followed by a move to the 4th value (index 3) in the array, followed by a move to the value at key "v". +// In this example, a subsequent call to the decoder's Decode() would unmarshal the value 35. +// +// SeekTo returns a boolean value indicating whether a match was found. +// +// Decoder is intended to be used with a stream of tokens. As a result it navigates forward only. +func (d *Decoder) SeekTo(path ...interface{}) (bool, error) { + + if len(path) == 0 { + return len(d.path) == 0, nil + } + last := len(path) - 1 + if i, ok := path[last].(int); ok { + path[last] = i - 1 + } + + for { + if d.path.Equal(path) { + return true, nil + } + _, err := d.Token() + if err == io.EOF { + return false, nil + } else if err != nil { + return false, err + } + } +} + +// Decode reads the next JSON-encoded value from its input and stores it in the value pointed to by v. This is +// equivalent to encoding/json.Decode(). +func (d *Decoder) Decode(v interface{}) error { + switch d.context { + case objValue: + d.context = objKey + break + case arrValue: + d.path.incTop() + break + } + return d.Decoder.Decode(v) +} + +// Path returns a slice of string and/or int values representing the path from the root of the JSON object to the +// position of the most-recently parsed token. +func (d *Decoder) Path() JsonPath { + p := make(JsonPath, len(d.path)) + copy(p, d.path) + return p +} + +// Token is equivalent to the Token() method on json.Decoder. The primary difference is that it distinguishes +// between strings that are keys and and strings that are values. String tokens that are object keys are returned as a +// KeyString rather than as a native string. +func (d *Decoder) Token() (json.Token, error) { + t, err := d.Decoder.Token() + if err != nil { + return t, err + } + + if t == nil { + switch d.context { + case objValue: + d.context = objKey + break + case arrValue: + d.path.incTop() + break + } + return t, err + } + + switch t := t.(type) { + case json.Delim: + switch t { + case json.Delim('{'): + if d.context == arrValue { + d.path.incTop() + } + d.path.push("") + d.context = objKey + break + case json.Delim('}'): + d.path.pop() + d.context = d.path.inferContext() + break + case json.Delim('['): + if d.context == arrValue { + d.path.incTop() + } + d.path.push(-1) + d.context = arrValue + break + case json.Delim(']'): + d.path.pop() + d.context = d.path.inferContext() + break + } + case float64, json.Number, bool: + switch d.context { + case objValue: + d.context = objKey + break + case arrValue: + d.path.incTop() + break + } + break + case string: + switch d.context { + case objKey: + d.path.nameTop(t) + d.context = objValue + return KeyString(t), err + case objValue: + d.context = objKey + case arrValue: + d.path.incTop() + } + break + } + + return t, err +} + +// Scan moves forward over the JSON stream consuming all the tokens at the current level (current object, current array) +// invoking each matching PathAction along the way. +// +// Scan returns true if there are more contiguous values to scan (for example in an array). +func (d *Decoder) Scan(ext *PathActions) (bool, error) { + + rootPath := d.Path() + + // If this is an array path, increment the root path in our local copy. + if rootPath.inferContext() == arrValue { + rootPath.incTop() + } + + for { + // advance the token position + _, err := d.Token() + if err != nil { + return false, err + } + + match: + var relPath JsonPath + + // capture the new JSON path + path := d.Path() + + if len(path) > len(rootPath) { + // capture the path relative to where the scan started + relPath = path[len(rootPath):] + } else { + // if the path is not longer than the root, then we are done with this scan + // return boolean flag indicating if there are more items to scan at the same level + return d.Decoder.More(), nil + } + + // match the relative path against the path actions + if node := ext.node.match(relPath); node != nil { + if node.action != nil { + // we have a match so execute the action + err = node.action(d) + if err != nil { + return d.Decoder.More(), err + } + // The action may have advanced the decoder. If we are in an array, advancing it further would + // skip tokens. So, if we are scanning an array, jump to the top without advancing the token. + if d.path.inferContext() == arrValue && d.Decoder.More() { + goto match + } + } + } + } +} diff --git a/vendor/github.com/exponent-io/jsonpath/path.go b/vendor/github.com/exponent-io/jsonpath/path.go new file mode 100644 index 0000000000..d7db2ad336 --- /dev/null +++ b/vendor/github.com/exponent-io/jsonpath/path.go @@ -0,0 +1,67 @@ +// Extends the Go runtime's json.Decoder enabling navigation of a stream of json tokens. +package jsonpath + +import "fmt" + +type jsonContext int + +const ( + none jsonContext = iota + objKey + objValue + arrValue +) + +// AnyIndex can be used in a pattern to match any array index. +const AnyIndex = -2 + +// JsonPath is a slice of strings and/or integers. Each string specifies an JSON object key, and +// each integer specifies an index into a JSON array. +type JsonPath []interface{} + +func (p *JsonPath) push(n interface{}) { *p = append(*p, n) } +func (p *JsonPath) pop() { *p = (*p)[:len(*p)-1] } + +// increment the index at the top of the stack (must be an array index) +func (p *JsonPath) incTop() { (*p)[len(*p)-1] = (*p)[len(*p)-1].(int) + 1 } + +// name the key at the top of the stack (must be an object key) +func (p *JsonPath) nameTop(n string) { (*p)[len(*p)-1] = n } + +// infer the context from the item at the top of the stack +func (p *JsonPath) inferContext() jsonContext { + if len(*p) == 0 { + return none + } + t := (*p)[len(*p)-1] + switch t.(type) { + case string: + return objKey + case int: + return arrValue + default: + panic(fmt.Sprintf("Invalid stack type %T", t)) + } +} + +// Equal tests for equality between two JsonPath types. +func (p *JsonPath) Equal(o JsonPath) bool { + if len(*p) != len(o) { + return false + } + for i, v := range *p { + if v != o[i] { + return false + } + } + return true +} + +func (p *JsonPath) HasPrefix(o JsonPath) bool { + for i, v := range o { + if v != (*p)[i] { + return false + } + } + return true +} diff --git a/vendor/github.com/exponent-io/jsonpath/pathaction.go b/vendor/github.com/exponent-io/jsonpath/pathaction.go new file mode 100644 index 0000000000..497ed686ca --- /dev/null +++ b/vendor/github.com/exponent-io/jsonpath/pathaction.go @@ -0,0 +1,61 @@ +package jsonpath + +// pathNode is used to construct a trie of paths to be matched +type pathNode struct { + matchOn interface{} // string, or integer + childNodes []pathNode + action DecodeAction +} + +// match climbs the trie to find a node that matches the given JSON path. +func (n *pathNode) match(path JsonPath) *pathNode { + var node *pathNode = n + for _, ps := range path { + found := false + for i, n := range node.childNodes { + if n.matchOn == ps { + node = &node.childNodes[i] + found = true + break + } else if _, ok := ps.(int); ok && n.matchOn == AnyIndex { + node = &node.childNodes[i] + found = true + break + } + } + if !found { + return nil + } + } + return node +} + +// PathActions represents a collection of DecodeAction functions that should be called at certain path positions +// when scanning the JSON stream. PathActions can be created once and used many times in one or more JSON streams. +type PathActions struct { + node pathNode +} + +// DecodeAction handlers are called by the Decoder when scanning objects. See PathActions.Add for more detail. +type DecodeAction func(d *Decoder) error + +// Add specifies an action to call on the Decoder when the specified path is encountered. +func (je *PathActions) Add(action DecodeAction, path ...interface{}) { + + var node *pathNode = &je.node + for _, ps := range path { + found := false + for i, n := range node.childNodes { + if n.matchOn == ps { + node = &node.childNodes[i] + found = true + break + } + } + if !found { + node.childNodes = append(node.childNodes, pathNode{matchOn: ps}) + node = &node.childNodes[len(node.childNodes)-1] + } + } + node.action = action +} diff --git a/vendor/github.com/gobwas/glob/.gitignore b/vendor/github.com/gobwas/glob/.gitignore new file mode 100644 index 0000000000..b4ae623be5 --- /dev/null +++ b/vendor/github.com/gobwas/glob/.gitignore @@ -0,0 +1,8 @@ +glob.iml +.idea +*.cpu +*.mem +*.test +*.dot +*.png +*.svg diff --git a/vendor/github.com/gobwas/glob/.travis.yml b/vendor/github.com/gobwas/glob/.travis.yml new file mode 100644 index 0000000000..e8a276826c --- /dev/null +++ b/vendor/github.com/gobwas/glob/.travis.yml @@ -0,0 +1,9 @@ +sudo: false + +language: go + +go: + - 1.5.3 + +script: + - go test -v ./... diff --git a/vendor/github.com/gobwas/glob/LICENSE b/vendor/github.com/gobwas/glob/LICENSE new file mode 100644 index 0000000000..9d4735cad9 --- /dev/null +++ b/vendor/github.com/gobwas/glob/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Sergey Kamardin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/gobwas/glob/bench.sh b/vendor/github.com/gobwas/glob/bench.sh new file mode 100644 index 0000000000..804cf22e64 --- /dev/null +++ b/vendor/github.com/gobwas/glob/bench.sh @@ -0,0 +1,26 @@ +#! /bin/bash + +bench() { + filename="/tmp/$1-$2.bench" + if test -e "${filename}"; + then + echo "Already exists ${filename}" + else + backup=`git rev-parse --abbrev-ref HEAD` + git checkout $1 + echo -n "Creating ${filename}... " + go test ./... -run=NONE -bench=$2 > "${filename}" -benchmem + echo "OK" + git checkout ${backup} + sleep 5 + fi +} + + +to=$1 +current=`git rev-parse --abbrev-ref HEAD` + +bench ${to} $2 +bench ${current} $2 + +benchcmp $3 "/tmp/${to}-$2.bench" "/tmp/${current}-$2.bench" diff --git a/vendor/github.com/gobwas/glob/compiler/compiler.go b/vendor/github.com/gobwas/glob/compiler/compiler.go new file mode 100644 index 0000000000..02e7de80a0 --- /dev/null +++ b/vendor/github.com/gobwas/glob/compiler/compiler.go @@ -0,0 +1,525 @@ +package compiler + +// TODO use constructor with all matchers, and to their structs private +// TODO glue multiple Text nodes (like after QuoteMeta) + +import ( + "fmt" + "reflect" + + "github.com/gobwas/glob/match" + "github.com/gobwas/glob/syntax/ast" + "github.com/gobwas/glob/util/runes" +) + +func optimizeMatcher(matcher match.Matcher) match.Matcher { + switch m := matcher.(type) { + + case match.Any: + if len(m.Separators) == 0 { + return match.NewSuper() + } + + case match.AnyOf: + if len(m.Matchers) == 1 { + return m.Matchers[0] + } + + return m + + case match.List: + if m.Not == false && len(m.List) == 1 { + return match.NewText(string(m.List)) + } + + return m + + case match.BTree: + m.Left = optimizeMatcher(m.Left) + m.Right = optimizeMatcher(m.Right) + + r, ok := m.Value.(match.Text) + if !ok { + return m + } + + var ( + leftNil = m.Left == nil + rightNil = m.Right == nil + ) + if leftNil && rightNil { + return match.NewText(r.Str) + } + + _, leftSuper := m.Left.(match.Super) + lp, leftPrefix := m.Left.(match.Prefix) + la, leftAny := m.Left.(match.Any) + + _, rightSuper := m.Right.(match.Super) + rs, rightSuffix := m.Right.(match.Suffix) + ra, rightAny := m.Right.(match.Any) + + switch { + case leftSuper && rightSuper: + return match.NewContains(r.Str, false) + + case leftSuper && rightNil: + return match.NewSuffix(r.Str) + + case rightSuper && leftNil: + return match.NewPrefix(r.Str) + + case leftNil && rightSuffix: + return match.NewPrefixSuffix(r.Str, rs.Suffix) + + case rightNil && leftPrefix: + return match.NewPrefixSuffix(lp.Prefix, r.Str) + + case rightNil && leftAny: + return match.NewSuffixAny(r.Str, la.Separators) + + case leftNil && rightAny: + return match.NewPrefixAny(r.Str, ra.Separators) + } + + return m + } + + return matcher +} + +func compileMatchers(matchers []match.Matcher) (match.Matcher, error) { + if len(matchers) == 0 { + return nil, fmt.Errorf("compile error: need at least one matcher") + } + if len(matchers) == 1 { + return matchers[0], nil + } + if m := glueMatchers(matchers); m != nil { + return m, nil + } + + idx := -1 + maxLen := -1 + var val match.Matcher + for i, matcher := range matchers { + if l := matcher.Len(); l != -1 && l >= maxLen { + maxLen = l + idx = i + val = matcher + } + } + + if val == nil { // not found matcher with static length + r, err := compileMatchers(matchers[1:]) + if err != nil { + return nil, err + } + return match.NewBTree(matchers[0], nil, r), nil + } + + left := matchers[:idx] + var right []match.Matcher + if len(matchers) > idx+1 { + right = matchers[idx+1:] + } + + var l, r match.Matcher + var err error + if len(left) > 0 { + l, err = compileMatchers(left) + if err != nil { + return nil, err + } + } + + if len(right) > 0 { + r, err = compileMatchers(right) + if err != nil { + return nil, err + } + } + + return match.NewBTree(val, l, r), nil +} + +func glueMatchers(matchers []match.Matcher) match.Matcher { + if m := glueMatchersAsEvery(matchers); m != nil { + return m + } + if m := glueMatchersAsRow(matchers); m != nil { + return m + } + return nil +} + +func glueMatchersAsRow(matchers []match.Matcher) match.Matcher { + if len(matchers) <= 1 { + return nil + } + + var ( + c []match.Matcher + l int + ) + for _, matcher := range matchers { + if ml := matcher.Len(); ml == -1 { + return nil + } else { + c = append(c, matcher) + l += ml + } + } + return match.NewRow(l, c...) +} + +func glueMatchersAsEvery(matchers []match.Matcher) match.Matcher { + if len(matchers) <= 1 { + return nil + } + + var ( + hasAny bool + hasSuper bool + hasSingle bool + min int + separator []rune + ) + + for i, matcher := range matchers { + var sep []rune + + switch m := matcher.(type) { + case match.Super: + sep = []rune{} + hasSuper = true + + case match.Any: + sep = m.Separators + hasAny = true + + case match.Single: + sep = m.Separators + hasSingle = true + min++ + + case match.List: + if !m.Not { + return nil + } + sep = m.List + hasSingle = true + min++ + + default: + return nil + } + + // initialize + if i == 0 { + separator = sep + } + + if runes.Equal(sep, separator) { + continue + } + + return nil + } + + if hasSuper && !hasAny && !hasSingle { + return match.NewSuper() + } + + if hasAny && !hasSuper && !hasSingle { + return match.NewAny(separator) + } + + if (hasAny || hasSuper) && min > 0 && len(separator) == 0 { + return match.NewMin(min) + } + + every := match.NewEveryOf() + + if min > 0 { + every.Add(match.NewMin(min)) + + if !hasAny && !hasSuper { + every.Add(match.NewMax(min)) + } + } + + if len(separator) > 0 { + every.Add(match.NewContains(string(separator), true)) + } + + return every +} + +func minimizeMatchers(matchers []match.Matcher) []match.Matcher { + var done match.Matcher + var left, right, count int + + for l := 0; l < len(matchers); l++ { + for r := len(matchers); r > l; r-- { + if glued := glueMatchers(matchers[l:r]); glued != nil { + var swap bool + + if done == nil { + swap = true + } else { + cl, gl := done.Len(), glued.Len() + swap = cl > -1 && gl > -1 && gl > cl + swap = swap || count < r-l + } + + if swap { + done = glued + left = l + right = r + count = r - l + } + } + } + } + + if done == nil { + return matchers + } + + next := append(append([]match.Matcher{}, matchers[:left]...), done) + if right < len(matchers) { + next = append(next, matchers[right:]...) + } + + if len(next) == len(matchers) { + return next + } + + return minimizeMatchers(next) +} + +// minimizeAnyOf tries to apply some heuristics to minimize number of nodes in given tree +func minimizeTree(tree *ast.Node) *ast.Node { + switch tree.Kind { + case ast.KindAnyOf: + return minimizeTreeAnyOf(tree) + default: + return nil + } +} + +// minimizeAnyOf tries to find common children of given node of AnyOf pattern +// it searches for common children from left and from right +// if any common children are found – then it returns new optimized ast tree +// else it returns nil +func minimizeTreeAnyOf(tree *ast.Node) *ast.Node { + if !areOfSameKind(tree.Children, ast.KindPattern) { + return nil + } + + commonLeft, commonRight := commonChildren(tree.Children) + commonLeftCount, commonRightCount := len(commonLeft), len(commonRight) + if commonLeftCount == 0 && commonRightCount == 0 { // there are no common parts + return nil + } + + var result []*ast.Node + if commonLeftCount > 0 { + result = append(result, ast.NewNode(ast.KindPattern, nil, commonLeft...)) + } + + var anyOf []*ast.Node + for _, child := range tree.Children { + reuse := child.Children[commonLeftCount : len(child.Children)-commonRightCount] + var node *ast.Node + if len(reuse) == 0 { + // this pattern is completely reduced by commonLeft and commonRight patterns + // so it become nothing + node = ast.NewNode(ast.KindNothing, nil) + } else { + node = ast.NewNode(ast.KindPattern, nil, reuse...) + } + anyOf = appendIfUnique(anyOf, node) + } + switch { + case len(anyOf) == 1 && anyOf[0].Kind != ast.KindNothing: + result = append(result, anyOf[0]) + case len(anyOf) > 1: + result = append(result, ast.NewNode(ast.KindAnyOf, nil, anyOf...)) + } + + if commonRightCount > 0 { + result = append(result, ast.NewNode(ast.KindPattern, nil, commonRight...)) + } + + return ast.NewNode(ast.KindPattern, nil, result...) +} + +func commonChildren(nodes []*ast.Node) (commonLeft, commonRight []*ast.Node) { + if len(nodes) <= 1 { + return + } + + // find node that has least number of children + idx := leastChildren(nodes) + if idx == -1 { + return + } + tree := nodes[idx] + treeLength := len(tree.Children) + + // allocate max able size for rightCommon slice + // to get ability insert elements in reverse order (from end to start) + // without sorting + commonRight = make([]*ast.Node, treeLength) + lastRight := treeLength // will use this to get results as commonRight[lastRight:] + + var ( + breakLeft bool + breakRight bool + commonTotal int + ) + for i, j := 0, treeLength-1; commonTotal < treeLength && j >= 0 && !(breakLeft && breakRight); i, j = i+1, j-1 { + treeLeft := tree.Children[i] + treeRight := tree.Children[j] + + for k := 0; k < len(nodes) && !(breakLeft && breakRight); k++ { + // skip least children node + if k == idx { + continue + } + + restLeft := nodes[k].Children[i] + restRight := nodes[k].Children[j+len(nodes[k].Children)-treeLength] + + breakLeft = breakLeft || !treeLeft.Equal(restLeft) + + // disable searching for right common parts, if left part is already overlapping + breakRight = breakRight || (!breakLeft && j <= i) + breakRight = breakRight || !treeRight.Equal(restRight) + } + + if !breakLeft { + commonTotal++ + commonLeft = append(commonLeft, treeLeft) + } + if !breakRight { + commonTotal++ + lastRight = j + commonRight[j] = treeRight + } + } + + commonRight = commonRight[lastRight:] + + return +} + +func appendIfUnique(target []*ast.Node, val *ast.Node) []*ast.Node { + for _, n := range target { + if reflect.DeepEqual(n, val) { + return target + } + } + return append(target, val) +} + +func areOfSameKind(nodes []*ast.Node, kind ast.Kind) bool { + for _, n := range nodes { + if n.Kind != kind { + return false + } + } + return true +} + +func leastChildren(nodes []*ast.Node) int { + min := -1 + idx := -1 + for i, n := range nodes { + if idx == -1 || (len(n.Children) < min) { + min = len(n.Children) + idx = i + } + } + return idx +} + +func compileTreeChildren(tree *ast.Node, sep []rune) ([]match.Matcher, error) { + var matchers []match.Matcher + for _, desc := range tree.Children { + m, err := compile(desc, sep) + if err != nil { + return nil, err + } + matchers = append(matchers, optimizeMatcher(m)) + } + return matchers, nil +} + +func compile(tree *ast.Node, sep []rune) (m match.Matcher, err error) { + switch tree.Kind { + case ast.KindAnyOf: + // todo this could be faster on pattern_alternatives_combine_lite (see glob_test.go) + if n := minimizeTree(tree); n != nil { + return compile(n, sep) + } + matchers, err := compileTreeChildren(tree, sep) + if err != nil { + return nil, err + } + return match.NewAnyOf(matchers...), nil + + case ast.KindPattern: + if len(tree.Children) == 0 { + return match.NewNothing(), nil + } + matchers, err := compileTreeChildren(tree, sep) + if err != nil { + return nil, err + } + m, err = compileMatchers(minimizeMatchers(matchers)) + if err != nil { + return nil, err + } + + case ast.KindAny: + m = match.NewAny(sep) + + case ast.KindSuper: + m = match.NewSuper() + + case ast.KindSingle: + m = match.NewSingle(sep) + + case ast.KindNothing: + m = match.NewNothing() + + case ast.KindList: + l := tree.Value.(ast.List) + m = match.NewList([]rune(l.Chars), l.Not) + + case ast.KindRange: + r := tree.Value.(ast.Range) + m = match.NewRange(r.Lo, r.Hi, r.Not) + + case ast.KindText: + t := tree.Value.(ast.Text) + m = match.NewText(t.Text) + + default: + return nil, fmt.Errorf("could not compile tree: unknown node type") + } + + return optimizeMatcher(m), nil +} + +func Compile(tree *ast.Node, sep []rune) (match.Matcher, error) { + m, err := compile(tree, sep) + if err != nil { + return nil, err + } + + return m, nil +} diff --git a/vendor/github.com/gobwas/glob/glob.go b/vendor/github.com/gobwas/glob/glob.go new file mode 100644 index 0000000000..2afde343af --- /dev/null +++ b/vendor/github.com/gobwas/glob/glob.go @@ -0,0 +1,80 @@ +package glob + +import ( + "github.com/gobwas/glob/compiler" + "github.com/gobwas/glob/syntax" +) + +// Glob represents compiled glob pattern. +type Glob interface { + Match(string) bool +} + +// Compile creates Glob for given pattern and strings (if any present after pattern) as separators. +// The pattern syntax is: +// +// pattern: +// { term } +// +// term: +// `*` matches any sequence of non-separator characters +// `**` matches any sequence of characters +// `?` matches any single non-separator character +// `[` [ `!` ] { character-range } `]` +// character class (must be non-empty) +// `{` pattern-list `}` +// pattern alternatives +// c matches character c (c != `*`, `**`, `?`, `\`, `[`, `{`, `}`) +// `\` c matches character c +// +// character-range: +// c matches character c (c != `\\`, `-`, `]`) +// `\` c matches character c +// lo `-` hi matches character c for lo <= c <= hi +// +// pattern-list: +// pattern { `,` pattern } +// comma-separated (without spaces) patterns +// +func Compile(pattern string, separators ...rune) (Glob, error) { + ast, err := syntax.Parse(pattern) + if err != nil { + return nil, err + } + + matcher, err := compiler.Compile(ast, separators) + if err != nil { + return nil, err + } + + return matcher, nil +} + +// MustCompile is the same as Compile, except that if Compile returns error, this will panic +func MustCompile(pattern string, separators ...rune) Glob { + g, err := Compile(pattern, separators...) + if err != nil { + panic(err) + } + + return g +} + +// QuoteMeta returns a string that quotes all glob pattern meta characters +// inside the argument text; For example, QuoteMeta(`{foo*}`) returns `\[foo\*\]`. +func QuoteMeta(s string) string { + b := make([]byte, 2*len(s)) + + // a byte loop is correct because all meta characters are ASCII + j := 0 + for i := 0; i < len(s); i++ { + if syntax.Special(s[i]) { + b[j] = '\\' + j++ + } + b[j] = s[i] + j++ + } + + return string(b[0:j]) +} diff --git a/vendor/github.com/gobwas/glob/match/any.go b/vendor/github.com/gobwas/glob/match/any.go new file mode 100644 index 0000000000..514a9a5c45 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/any.go @@ -0,0 +1,45 @@ +package match + +import ( + "fmt" + "github.com/gobwas/glob/util/strings" +) + +type Any struct { + Separators []rune +} + +func NewAny(s []rune) Any { + return Any{s} +} + +func (self Any) Match(s string) bool { + return strings.IndexAnyRunes(s, self.Separators) == -1 +} + +func (self Any) Index(s string) (int, []int) { + found := strings.IndexAnyRunes(s, self.Separators) + switch found { + case -1: + case 0: + return 0, segments0 + default: + s = s[:found] + } + + segments := acquireSegments(len(s)) + for i := range s { + segments = append(segments, i) + } + segments = append(segments, len(s)) + + return 0, segments +} + +func (self Any) Len() int { + return lenNo +} + +func (self Any) String() string { + return fmt.Sprintf("", string(self.Separators)) +} diff --git a/vendor/github.com/gobwas/glob/match/any_of.go b/vendor/github.com/gobwas/glob/match/any_of.go new file mode 100644 index 0000000000..8e65356cdc --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/any_of.go @@ -0,0 +1,82 @@ +package match + +import "fmt" + +type AnyOf struct { + Matchers Matchers +} + +func NewAnyOf(m ...Matcher) AnyOf { + return AnyOf{Matchers(m)} +} + +func (self *AnyOf) Add(m Matcher) error { + self.Matchers = append(self.Matchers, m) + return nil +} + +func (self AnyOf) Match(s string) bool { + for _, m := range self.Matchers { + if m.Match(s) { + return true + } + } + + return false +} + +func (self AnyOf) Index(s string) (int, []int) { + index := -1 + + segments := acquireSegments(len(s)) + for _, m := range self.Matchers { + idx, seg := m.Index(s) + if idx == -1 { + continue + } + + if index == -1 || idx < index { + index = idx + segments = append(segments[:0], seg...) + continue + } + + if idx > index { + continue + } + + // here idx == index + segments = appendMerge(segments, seg) + } + + if index == -1 { + releaseSegments(segments) + return -1, nil + } + + return index, segments +} + +func (self AnyOf) Len() (l int) { + l = -1 + for _, m := range self.Matchers { + ml := m.Len() + switch { + case l == -1: + l = ml + continue + + case ml == -1: + return -1 + + case l != ml: + return -1 + } + } + + return +} + +func (self AnyOf) String() string { + return fmt.Sprintf("", self.Matchers) +} diff --git a/vendor/github.com/gobwas/glob/match/btree.go b/vendor/github.com/gobwas/glob/match/btree.go new file mode 100644 index 0000000000..a8130e93ea --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/btree.go @@ -0,0 +1,146 @@ +package match + +import ( + "fmt" + "unicode/utf8" +) + +type BTree struct { + Value Matcher + Left Matcher + Right Matcher + ValueLengthRunes int + LeftLengthRunes int + RightLengthRunes int + LengthRunes int +} + +func NewBTree(Value, Left, Right Matcher) (tree BTree) { + tree.Value = Value + tree.Left = Left + tree.Right = Right + + lenOk := true + if tree.ValueLengthRunes = Value.Len(); tree.ValueLengthRunes == -1 { + lenOk = false + } + + if Left != nil { + if tree.LeftLengthRunes = Left.Len(); tree.LeftLengthRunes == -1 { + lenOk = false + } + } + + if Right != nil { + if tree.RightLengthRunes = Right.Len(); tree.RightLengthRunes == -1 { + lenOk = false + } + } + + if lenOk { + tree.LengthRunes = tree.LeftLengthRunes + tree.ValueLengthRunes + tree.RightLengthRunes + } else { + tree.LengthRunes = -1 + } + + return tree +} + +func (self BTree) Len() int { + return self.LengthRunes +} + +// todo? +func (self BTree) Index(s string) (int, []int) { + return -1, nil +} + +func (self BTree) Match(s string) bool { + inputLen := len(s) + + // self.Length, self.RLen and self.LLen are values meaning the length of runes for each part + // here we manipulating byte length for better optimizations + // but these checks still works, cause minLen of 1-rune string is 1 byte. + if self.LengthRunes != -1 && self.LengthRunes > inputLen { + return false + } + + // try to cut unnecessary parts + // by knowledge of length of right and left part + var offset, limit int + if self.LeftLengthRunes >= 0 { + offset = self.LeftLengthRunes + } + if self.RightLengthRunes >= 0 { + limit = inputLen - self.RightLengthRunes + } else { + limit = inputLen + } + + for offset < limit { + // search for matching part in substring + index, segments := self.Value.Index(s[offset:limit]) + if index == -1 { + releaseSegments(segments) + return false + } + + l := s[:offset+index] + var left bool + if self.Left != nil { + left = self.Left.Match(l) + } else { + left = l == "" + } + + if left { + for i := len(segments) - 1; i >= 0; i-- { + length := segments[i] + + var right bool + var r string + // if there is no string for the right branch + if inputLen <= offset+index+length { + r = "" + } else { + r = s[offset+index+length:] + } + + if self.Right != nil { + right = self.Right.Match(r) + } else { + right = r == "" + } + + if right { + releaseSegments(segments) + return true + } + } + } + + _, step := utf8.DecodeRuneInString(s[offset+index:]) + offset += index + step + + releaseSegments(segments) + } + + return false +} + +func (self BTree) String() string { + const n string = "" + var l, r string + if self.Left == nil { + l = n + } else { + l = self.Left.String() + } + if self.Right == nil { + r = n + } else { + r = self.Right.String() + } + + return fmt.Sprintf("%s]>", l, self.Value, r) +} diff --git a/vendor/github.com/gobwas/glob/match/contains.go b/vendor/github.com/gobwas/glob/match/contains.go new file mode 100644 index 0000000000..0998e95b0e --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/contains.go @@ -0,0 +1,58 @@ +package match + +import ( + "fmt" + "strings" +) + +type Contains struct { + Needle string + Not bool +} + +func NewContains(needle string, not bool) Contains { + return Contains{needle, not} +} + +func (self Contains) Match(s string) bool { + return strings.Contains(s, self.Needle) != self.Not +} + +func (self Contains) Index(s string) (int, []int) { + var offset int + + idx := strings.Index(s, self.Needle) + + if !self.Not { + if idx == -1 { + return -1, nil + } + + offset = idx + len(self.Needle) + if len(s) <= offset { + return 0, []int{offset} + } + s = s[offset:] + } else if idx != -1 { + s = s[:idx] + } + + segments := acquireSegments(len(s) + 1) + for i := range s { + segments = append(segments, offset+i) + } + + return 0, append(segments, offset+len(s)) +} + +func (self Contains) Len() int { + return lenNo +} + +func (self Contains) String() string { + var not string + if self.Not { + not = "!" + } + return fmt.Sprintf("", not, self.Needle) +} diff --git a/vendor/github.com/gobwas/glob/match/every_of.go b/vendor/github.com/gobwas/glob/match/every_of.go new file mode 100644 index 0000000000..7c968ee368 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/every_of.go @@ -0,0 +1,99 @@ +package match + +import ( + "fmt" +) + +type EveryOf struct { + Matchers Matchers +} + +func NewEveryOf(m ...Matcher) EveryOf { + return EveryOf{Matchers(m)} +} + +func (self *EveryOf) Add(m Matcher) error { + self.Matchers = append(self.Matchers, m) + return nil +} + +func (self EveryOf) Len() (l int) { + for _, m := range self.Matchers { + if ml := m.Len(); l > 0 { + l += ml + } else { + return -1 + } + } + + return +} + +func (self EveryOf) Index(s string) (int, []int) { + var index int + var offset int + + // make `in` with cap as len(s), + // cause it is the maximum size of output segments values + next := acquireSegments(len(s)) + current := acquireSegments(len(s)) + + sub := s + for i, m := range self.Matchers { + idx, seg := m.Index(sub) + if idx == -1 { + releaseSegments(next) + releaseSegments(current) + return -1, nil + } + + if i == 0 { + // we use copy here instead of `current = seg` + // cause seg is a slice from reusable buffer `in` + // and it could be overwritten in next iteration + current = append(current, seg...) + } else { + // clear the next + next = next[:0] + + delta := index - (idx + offset) + for _, ex := range current { + for _, n := range seg { + if ex+delta == n { + next = append(next, n) + } + } + } + + if len(next) == 0 { + releaseSegments(next) + releaseSegments(current) + return -1, nil + } + + current = append(current[:0], next...) + } + + index = idx + offset + sub = s[index:] + offset += idx + } + + releaseSegments(next) + + return index, current +} + +func (self EveryOf) Match(s string) bool { + for _, m := range self.Matchers { + if !m.Match(s) { + return false + } + } + + return true +} + +func (self EveryOf) String() string { + return fmt.Sprintf("", self.Matchers) +} diff --git a/vendor/github.com/gobwas/glob/match/list.go b/vendor/github.com/gobwas/glob/match/list.go new file mode 100644 index 0000000000..7fd763ecd8 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/list.go @@ -0,0 +1,49 @@ +package match + +import ( + "fmt" + "github.com/gobwas/glob/util/runes" + "unicode/utf8" +) + +type List struct { + List []rune + Not bool +} + +func NewList(list []rune, not bool) List { + return List{list, not} +} + +func (self List) Match(s string) bool { + r, w := utf8.DecodeRuneInString(s) + if len(s) > w { + return false + } + + inList := runes.IndexRune(self.List, r) != -1 + return inList == !self.Not +} + +func (self List) Len() int { + return lenOne +} + +func (self List) Index(s string) (int, []int) { + for i, r := range s { + if self.Not == (runes.IndexRune(self.List, r) == -1) { + return i, segmentsByRuneLength[utf8.RuneLen(r)] + } + } + + return -1, nil +} + +func (self List) String() string { + var not string + if self.Not { + not = "!" + } + + return fmt.Sprintf("", not, string(self.List)) +} diff --git a/vendor/github.com/gobwas/glob/match/match.go b/vendor/github.com/gobwas/glob/match/match.go new file mode 100644 index 0000000000..f80e007fb8 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/match.go @@ -0,0 +1,81 @@ +package match + +// todo common table of rune's length + +import ( + "fmt" + "strings" +) + +const lenOne = 1 +const lenZero = 0 +const lenNo = -1 + +type Matcher interface { + Match(string) bool + Index(string) (int, []int) + Len() int + String() string +} + +type Matchers []Matcher + +func (m Matchers) String() string { + var s []string + for _, matcher := range m { + s = append(s, fmt.Sprint(matcher)) + } + + return fmt.Sprintf("%s", strings.Join(s, ",")) +} + +// appendMerge merges and sorts given already SORTED and UNIQUE segments. +func appendMerge(target, sub []int) []int { + lt, ls := len(target), len(sub) + out := make([]int, 0, lt+ls) + + for x, y := 0, 0; x < lt || y < ls; { + if x >= lt { + out = append(out, sub[y:]...) + break + } + + if y >= ls { + out = append(out, target[x:]...) + break + } + + xValue := target[x] + yValue := sub[y] + + switch { + + case xValue == yValue: + out = append(out, xValue) + x++ + y++ + + case xValue < yValue: + out = append(out, xValue) + x++ + + case yValue < xValue: + out = append(out, yValue) + y++ + + } + } + + target = append(target[:0], out...) + + return target +} + +func reverseSegments(input []int) { + l := len(input) + m := l / 2 + + for i := 0; i < m; i++ { + input[i], input[l-i-1] = input[l-i-1], input[i] + } +} diff --git a/vendor/github.com/gobwas/glob/match/max.go b/vendor/github.com/gobwas/glob/match/max.go new file mode 100644 index 0000000000..d72f69efff --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/max.go @@ -0,0 +1,49 @@ +package match + +import ( + "fmt" + "unicode/utf8" +) + +type Max struct { + Limit int +} + +func NewMax(l int) Max { + return Max{l} +} + +func (self Max) Match(s string) bool { + var l int + for range s { + l += 1 + if l > self.Limit { + return false + } + } + + return true +} + +func (self Max) Index(s string) (int, []int) { + segments := acquireSegments(self.Limit + 1) + segments = append(segments, 0) + var count int + for i, r := range s { + count++ + if count > self.Limit { + break + } + segments = append(segments, i+utf8.RuneLen(r)) + } + + return 0, segments +} + +func (self Max) Len() int { + return lenNo +} + +func (self Max) String() string { + return fmt.Sprintf("", self.Limit) +} diff --git a/vendor/github.com/gobwas/glob/match/min.go b/vendor/github.com/gobwas/glob/match/min.go new file mode 100644 index 0000000000..db57ac8eb4 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/min.go @@ -0,0 +1,57 @@ +package match + +import ( + "fmt" + "unicode/utf8" +) + +type Min struct { + Limit int +} + +func NewMin(l int) Min { + return Min{l} +} + +func (self Min) Match(s string) bool { + var l int + for range s { + l += 1 + if l >= self.Limit { + return true + } + } + + return false +} + +func (self Min) Index(s string) (int, []int) { + var count int + + c := len(s) - self.Limit + 1 + if c <= 0 { + return -1, nil + } + + segments := acquireSegments(c) + for i, r := range s { + count++ + if count >= self.Limit { + segments = append(segments, i+utf8.RuneLen(r)) + } + } + + if len(segments) == 0 { + return -1, nil + } + + return 0, segments +} + +func (self Min) Len() int { + return lenNo +} + +func (self Min) String() string { + return fmt.Sprintf("", self.Limit) +} diff --git a/vendor/github.com/gobwas/glob/match/nothing.go b/vendor/github.com/gobwas/glob/match/nothing.go new file mode 100644 index 0000000000..0d4ecd36b8 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/nothing.go @@ -0,0 +1,27 @@ +package match + +import ( + "fmt" +) + +type Nothing struct{} + +func NewNothing() Nothing { + return Nothing{} +} + +func (self Nothing) Match(s string) bool { + return len(s) == 0 +} + +func (self Nothing) Index(s string) (int, []int) { + return 0, segments0 +} + +func (self Nothing) Len() int { + return lenZero +} + +func (self Nothing) String() string { + return fmt.Sprintf("") +} diff --git a/vendor/github.com/gobwas/glob/match/prefix.go b/vendor/github.com/gobwas/glob/match/prefix.go new file mode 100644 index 0000000000..a7347250e8 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/prefix.go @@ -0,0 +1,50 @@ +package match + +import ( + "fmt" + "strings" + "unicode/utf8" +) + +type Prefix struct { + Prefix string +} + +func NewPrefix(p string) Prefix { + return Prefix{p} +} + +func (self Prefix) Index(s string) (int, []int) { + idx := strings.Index(s, self.Prefix) + if idx == -1 { + return -1, nil + } + + length := len(self.Prefix) + var sub string + if len(s) > idx+length { + sub = s[idx+length:] + } else { + sub = "" + } + + segments := acquireSegments(len(sub) + 1) + segments = append(segments, length) + for i, r := range sub { + segments = append(segments, length+i+utf8.RuneLen(r)) + } + + return idx, segments +} + +func (self Prefix) Len() int { + return lenNo +} + +func (self Prefix) Match(s string) bool { + return strings.HasPrefix(s, self.Prefix) +} + +func (self Prefix) String() string { + return fmt.Sprintf("", self.Prefix) +} diff --git a/vendor/github.com/gobwas/glob/match/prefix_any.go b/vendor/github.com/gobwas/glob/match/prefix_any.go new file mode 100644 index 0000000000..8ee58fe1b3 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/prefix_any.go @@ -0,0 +1,55 @@ +package match + +import ( + "fmt" + "strings" + "unicode/utf8" + + sutil "github.com/gobwas/glob/util/strings" +) + +type PrefixAny struct { + Prefix string + Separators []rune +} + +func NewPrefixAny(s string, sep []rune) PrefixAny { + return PrefixAny{s, sep} +} + +func (self PrefixAny) Index(s string) (int, []int) { + idx := strings.Index(s, self.Prefix) + if idx == -1 { + return -1, nil + } + + n := len(self.Prefix) + sub := s[idx+n:] + i := sutil.IndexAnyRunes(sub, self.Separators) + if i > -1 { + sub = sub[:i] + } + + seg := acquireSegments(len(sub) + 1) + seg = append(seg, n) + for i, r := range sub { + seg = append(seg, n+i+utf8.RuneLen(r)) + } + + return idx, seg +} + +func (self PrefixAny) Len() int { + return lenNo +} + +func (self PrefixAny) Match(s string) bool { + if !strings.HasPrefix(s, self.Prefix) { + return false + } + return sutil.IndexAnyRunes(s[len(self.Prefix):], self.Separators) == -1 +} + +func (self PrefixAny) String() string { + return fmt.Sprintf("", self.Prefix, string(self.Separators)) +} diff --git a/vendor/github.com/gobwas/glob/match/prefix_suffix.go b/vendor/github.com/gobwas/glob/match/prefix_suffix.go new file mode 100644 index 0000000000..8208085a19 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/prefix_suffix.go @@ -0,0 +1,62 @@ +package match + +import ( + "fmt" + "strings" +) + +type PrefixSuffix struct { + Prefix, Suffix string +} + +func NewPrefixSuffix(p, s string) PrefixSuffix { + return PrefixSuffix{p, s} +} + +func (self PrefixSuffix) Index(s string) (int, []int) { + prefixIdx := strings.Index(s, self.Prefix) + if prefixIdx == -1 { + return -1, nil + } + + suffixLen := len(self.Suffix) + if suffixLen <= 0 { + return prefixIdx, []int{len(s) - prefixIdx} + } + + if (len(s) - prefixIdx) <= 0 { + return -1, nil + } + + segments := acquireSegments(len(s) - prefixIdx) + for sub := s[prefixIdx:]; ; { + suffixIdx := strings.LastIndex(sub, self.Suffix) + if suffixIdx == -1 { + break + } + + segments = append(segments, suffixIdx+suffixLen) + sub = sub[:suffixIdx] + } + + if len(segments) == 0 { + releaseSegments(segments) + return -1, nil + } + + reverseSegments(segments) + + return prefixIdx, segments +} + +func (self PrefixSuffix) Len() int { + return lenNo +} + +func (self PrefixSuffix) Match(s string) bool { + return strings.HasPrefix(s, self.Prefix) && strings.HasSuffix(s, self.Suffix) +} + +func (self PrefixSuffix) String() string { + return fmt.Sprintf("", self.Prefix, self.Suffix) +} diff --git a/vendor/github.com/gobwas/glob/match/range.go b/vendor/github.com/gobwas/glob/match/range.go new file mode 100644 index 0000000000..ce30245a40 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/range.go @@ -0,0 +1,48 @@ +package match + +import ( + "fmt" + "unicode/utf8" +) + +type Range struct { + Lo, Hi rune + Not bool +} + +func NewRange(lo, hi rune, not bool) Range { + return Range{lo, hi, not} +} + +func (self Range) Len() int { + return lenOne +} + +func (self Range) Match(s string) bool { + r, w := utf8.DecodeRuneInString(s) + if len(s) > w { + return false + } + + inRange := r >= self.Lo && r <= self.Hi + + return inRange == !self.Not +} + +func (self Range) Index(s string) (int, []int) { + for i, r := range s { + if self.Not != (r >= self.Lo && r <= self.Hi) { + return i, segmentsByRuneLength[utf8.RuneLen(r)] + } + } + + return -1, nil +} + +func (self Range) String() string { + var not string + if self.Not { + not = "!" + } + return fmt.Sprintf("", not, string(self.Lo), string(self.Hi)) +} diff --git a/vendor/github.com/gobwas/glob/match/row.go b/vendor/github.com/gobwas/glob/match/row.go new file mode 100644 index 0000000000..4379042e42 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/row.go @@ -0,0 +1,77 @@ +package match + +import ( + "fmt" +) + +type Row struct { + Matchers Matchers + RunesLength int + Segments []int +} + +func NewRow(len int, m ...Matcher) Row { + return Row{ + Matchers: Matchers(m), + RunesLength: len, + Segments: []int{len}, + } +} + +func (self Row) matchAll(s string) bool { + var idx int + for _, m := range self.Matchers { + length := m.Len() + + var next, i int + for next = range s[idx:] { + i++ + if i == length { + break + } + } + + if i < length || !m.Match(s[idx:idx+next+1]) { + return false + } + + idx += next + 1 + } + + return true +} + +func (self Row) lenOk(s string) bool { + var i int + for range s { + i++ + if i > self.RunesLength { + return false + } + } + return self.RunesLength == i +} + +func (self Row) Match(s string) bool { + return self.lenOk(s) && self.matchAll(s) +} + +func (self Row) Len() (l int) { + return self.RunesLength +} + +func (self Row) Index(s string) (int, []int) { + for i := range s { + if len(s[i:]) < self.RunesLength { + break + } + if self.matchAll(s[i:]) { + return i, self.Segments + } + } + return -1, nil +} + +func (self Row) String() string { + return fmt.Sprintf("", self.RunesLength, self.Matchers) +} diff --git a/vendor/github.com/gobwas/glob/match/segments.go b/vendor/github.com/gobwas/glob/match/segments.go new file mode 100644 index 0000000000..9ea6f30943 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/segments.go @@ -0,0 +1,91 @@ +package match + +import ( + "sync" +) + +type SomePool interface { + Get() []int + Put([]int) +} + +var segmentsPools [1024]sync.Pool + +func toPowerOfTwo(v int) int { + v-- + v |= v >> 1 + v |= v >> 2 + v |= v >> 4 + v |= v >> 8 + v |= v >> 16 + v++ + + return v +} + +const ( + cacheFrom = 16 + cacheToAndHigher = 1024 + cacheFromIndex = 15 + cacheToAndHigherIndex = 1023 +) + +var ( + segments0 = []int{0} + segments1 = []int{1} + segments2 = []int{2} + segments3 = []int{3} + segments4 = []int{4} +) + +var segmentsByRuneLength [5][]int = [5][]int{ + 0: segments0, + 1: segments1, + 2: segments2, + 3: segments3, + 4: segments4, +} + +func init() { + for i := cacheToAndHigher; i >= cacheFrom; i >>= 1 { + func(i int) { + segmentsPools[i-1] = sync.Pool{New: func() interface{} { + return make([]int, 0, i) + }} + }(i) + } +} + +func getTableIndex(c int) int { + p := toPowerOfTwo(c) + switch { + case p >= cacheToAndHigher: + return cacheToAndHigherIndex + case p <= cacheFrom: + return cacheFromIndex + default: + return p - 1 + } +} + +func acquireSegments(c int) []int { + // make []int with less capacity than cacheFrom + // is faster than acquiring it from pool + if c < cacheFrom { + return make([]int, 0, c) + } + + return segmentsPools[getTableIndex(c)].Get().([]int)[:0] +} + +func releaseSegments(s []int) { + c := cap(s) + + // make []int with less capacity than cacheFrom + // is faster than acquiring it from pool + if c < cacheFrom { + return + } + + segmentsPools[getTableIndex(c)].Put(s) +} diff --git a/vendor/github.com/gobwas/glob/match/single.go b/vendor/github.com/gobwas/glob/match/single.go new file mode 100644 index 0000000000..ee6e3954c1 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/single.go @@ -0,0 +1,43 @@ +package match + +import ( + "fmt" + "github.com/gobwas/glob/util/runes" + "unicode/utf8" +) + +// single represents ? +type Single struct { + Separators []rune +} + +func NewSingle(s []rune) Single { + return Single{s} +} + +func (self Single) Match(s string) bool { + r, w := utf8.DecodeRuneInString(s) + if len(s) > w { + return false + } + + return runes.IndexRune(self.Separators, r) == -1 +} + +func (self Single) Len() int { + return lenOne +} + +func (self Single) Index(s string) (int, []int) { + for i, r := range s { + if runes.IndexRune(self.Separators, r) == -1 { + return i, segmentsByRuneLength[utf8.RuneLen(r)] + } + } + + return -1, nil +} + +func (self Single) String() string { + return fmt.Sprintf("", string(self.Separators)) +} diff --git a/vendor/github.com/gobwas/glob/match/suffix.go b/vendor/github.com/gobwas/glob/match/suffix.go new file mode 100644 index 0000000000..85bea8c68e --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/suffix.go @@ -0,0 +1,35 @@ +package match + +import ( + "fmt" + "strings" +) + +type Suffix struct { + Suffix string +} + +func NewSuffix(s string) Suffix { + return Suffix{s} +} + +func (self Suffix) Len() int { + return lenNo +} + +func (self Suffix) Match(s string) bool { + return strings.HasSuffix(s, self.Suffix) +} + +func (self Suffix) Index(s string) (int, []int) { + idx := strings.Index(s, self.Suffix) + if idx == -1 { + return -1, nil + } + + return 0, []int{idx + len(self.Suffix)} +} + +func (self Suffix) String() string { + return fmt.Sprintf("", self.Suffix) +} diff --git a/vendor/github.com/gobwas/glob/match/suffix_any.go b/vendor/github.com/gobwas/glob/match/suffix_any.go new file mode 100644 index 0000000000..c5106f8196 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/suffix_any.go @@ -0,0 +1,43 @@ +package match + +import ( + "fmt" + "strings" + + sutil "github.com/gobwas/glob/util/strings" +) + +type SuffixAny struct { + Suffix string + Separators []rune +} + +func NewSuffixAny(s string, sep []rune) SuffixAny { + return SuffixAny{s, sep} +} + +func (self SuffixAny) Index(s string) (int, []int) { + idx := strings.Index(s, self.Suffix) + if idx == -1 { + return -1, nil + } + + i := sutil.LastIndexAnyRunes(s[:idx], self.Separators) + 1 + + return i, []int{idx + len(self.Suffix) - i} +} + +func (self SuffixAny) Len() int { + return lenNo +} + +func (self SuffixAny) Match(s string) bool { + if !strings.HasSuffix(s, self.Suffix) { + return false + } + return sutil.IndexAnyRunes(s[:len(s)-len(self.Suffix)], self.Separators) == -1 +} + +func (self SuffixAny) String() string { + return fmt.Sprintf("", string(self.Separators), self.Suffix) +} diff --git a/vendor/github.com/gobwas/glob/match/super.go b/vendor/github.com/gobwas/glob/match/super.go new file mode 100644 index 0000000000..3875950bb8 --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/super.go @@ -0,0 +1,33 @@ +package match + +import ( + "fmt" +) + +type Super struct{} + +func NewSuper() Super { + return Super{} +} + +func (self Super) Match(s string) bool { + return true +} + +func (self Super) Len() int { + return lenNo +} + +func (self Super) Index(s string) (int, []int) { + segments := acquireSegments(len(s) + 1) + for i := range s { + segments = append(segments, i) + } + segments = append(segments, len(s)) + + return 0, segments +} + +func (self Super) String() string { + return fmt.Sprintf("") +} diff --git a/vendor/github.com/gobwas/glob/match/text.go b/vendor/github.com/gobwas/glob/match/text.go new file mode 100644 index 0000000000..0a17616d3c --- /dev/null +++ b/vendor/github.com/gobwas/glob/match/text.go @@ -0,0 +1,45 @@ +package match + +import ( + "fmt" + "strings" + "unicode/utf8" +) + +// raw represents raw string to match +type Text struct { + Str string + RunesLength int + BytesLength int + Segments []int +} + +func NewText(s string) Text { + return Text{ + Str: s, + RunesLength: utf8.RuneCountInString(s), + BytesLength: len(s), + Segments: []int{len(s)}, + } +} + +func (self Text) Match(s string) bool { + return self.Str == s +} + +func (self Text) Len() int { + return self.RunesLength +} + +func (self Text) Index(s string) (int, []int) { + index := strings.Index(s, self.Str) + if index == -1 { + return -1, nil + } + + return index, self.Segments +} + +func (self Text) String() string { + return fmt.Sprintf("", self.Str) +} diff --git a/vendor/github.com/gobwas/glob/readme.md b/vendor/github.com/gobwas/glob/readme.md new file mode 100644 index 0000000000..f58144e733 --- /dev/null +++ b/vendor/github.com/gobwas/glob/readme.md @@ -0,0 +1,148 @@ +# glob.[go](https://golang.org) + +[![GoDoc][godoc-image]][godoc-url] [![Build Status][travis-image]][travis-url] + +> Go Globbing Library. + +## Install + +```shell + go get github.com/gobwas/glob +``` + +## Example + +```go + +package main + +import "github.com/gobwas/glob" + +func main() { + var g glob.Glob + + // create simple glob + g = glob.MustCompile("*.github.com") + g.Match("api.github.com") // true + + // quote meta characters and then create simple glob + g = glob.MustCompile(glob.QuoteMeta("*.github.com")) + g.Match("*.github.com") // true + + // create new glob with set of delimiters as ["."] + g = glob.MustCompile("api.*.com", '.') + g.Match("api.github.com") // true + g.Match("api.gi.hub.com") // false + + // create new glob with set of delimiters as ["."] + // but now with super wildcard + g = glob.MustCompile("api.**.com", '.') + g.Match("api.github.com") // true + g.Match("api.gi.hub.com") // true + + // create glob with single symbol wildcard + g = glob.MustCompile("?at") + g.Match("cat") // true + g.Match("fat") // true + g.Match("at") // false + + // create glob with single symbol wildcard and delimiters ['f'] + g = glob.MustCompile("?at", 'f') + g.Match("cat") // true + g.Match("fat") // false + g.Match("at") // false + + // create glob with character-list matchers + g = glob.MustCompile("[abc]at") + g.Match("cat") // true + g.Match("bat") // true + g.Match("fat") // false + g.Match("at") // false + + // create glob with character-list matchers + g = glob.MustCompile("[!abc]at") + g.Match("cat") // false + g.Match("bat") // false + g.Match("fat") // true + g.Match("at") // false + + // create glob with character-range matchers + g = glob.MustCompile("[a-c]at") + g.Match("cat") // true + g.Match("bat") // true + g.Match("fat") // false + g.Match("at") // false + + // create glob with character-range matchers + g = glob.MustCompile("[!a-c]at") + g.Match("cat") // false + g.Match("bat") // false + g.Match("fat") // true + g.Match("at") // false + + // create glob with pattern-alternatives list + g = glob.MustCompile("{cat,bat,[fr]at}") + g.Match("cat") // true + g.Match("bat") // true + g.Match("fat") // true + g.Match("rat") // true + g.Match("at") // false + g.Match("zat") // false +} + +``` + +## Performance + +This library is created for compile-once patterns. This means, that compilation could take time, but +strings matching is done faster, than in case when always parsing template. + +If you will not use compiled `glob.Glob` object, and do `g := glob.MustCompile(pattern); g.Match(...)` every time, then your code will be much more slower. + +Run `go test -bench=.` from source root to see the benchmarks: + +Pattern | Fixture | Match | Speed (ns/op) +--------|---------|-------|-------------- +`[a-z][!a-x]*cat*[h][!b]*eyes*` | `my cat has very bright eyes` | `true` | 432 +`[a-z][!a-x]*cat*[h][!b]*eyes*` | `my dog has very bright eyes` | `false` | 199 +`https://*.google.*` | `https://account.google.com` | `true` | 96 +`https://*.google.*` | `https://google.com` | `false` | 66 +`{https://*.google.*,*yandex.*,*yahoo.*,*mail.ru}` | `http://yahoo.com` | `true` | 163 +`{https://*.google.*,*yandex.*,*yahoo.*,*mail.ru}` | `http://google.com` | `false` | 197 +`{https://*gobwas.com,http://exclude.gobwas.com}` | `https://safe.gobwas.com` | `true` | 22 +`{https://*gobwas.com,http://exclude.gobwas.com}` | `http://safe.gobwas.com` | `false` | 24 +`abc*` | `abcdef` | `true` | 8.15 +`abc*` | `af` | `false` | 5.68 +`*def` | `abcdef` | `true` | 8.84 +`*def` | `af` | `false` | 5.74 +`ab*ef` | `abcdef` | `true` | 15.2 +`ab*ef` | `af` | `false` | 10.4 + +The same things with `regexp` package: + +Pattern | Fixture | Match | Speed (ns/op) +--------|---------|-------|-------------- +`^[a-z][^a-x].*cat.*[h][^b].*eyes.*$` | `my cat has very bright eyes` | `true` | 2553 +`^[a-z][^a-x].*cat.*[h][^b].*eyes.*$` | `my dog has very bright eyes` | `false` | 1383 +`^https:\/\/.*\.google\..*$` | `https://account.google.com` | `true` | 1205 +`^https:\/\/.*\.google\..*$` | `https://google.com` | `false` | 767 +`^(https:\/\/.*\.google\..*|.*yandex\..*|.*yahoo\..*|.*mail\.ru)$` | `http://yahoo.com` | `true` | 1435 +`^(https:\/\/.*\.google\..*|.*yandex\..*|.*yahoo\..*|.*mail\.ru)$` | `http://google.com` | `false` | 1674 +`^(https:\/\/.*gobwas\.com|http://exclude.gobwas.com)$` | `https://safe.gobwas.com` | `true` | 1039 +`^(https:\/\/.*gobwas\.com|http://exclude.gobwas.com)$` | `http://safe.gobwas.com` | `false` | 272 +`^abc.*$` | `abcdef` | `true` | 237 +`^abc.*$` | `af` | `false` | 100 +`^.*def$` | `abcdef` | `true` | 464 +`^.*def$` | `af` | `false` | 265 +`^ab.*ef$` | `abcdef` | `true` | 375 +`^ab.*ef$` | `af` | `false` | 145 + +[godoc-image]: https://godoc.org/github.com/gobwas/glob?status.svg +[godoc-url]: https://godoc.org/github.com/gobwas/glob +[travis-image]: https://travis-ci.org/gobwas/glob.svg?branch=master +[travis-url]: https://travis-ci.org/gobwas/glob + +## Syntax + +Syntax is inspired by [standard wildcards](http://tldp.org/LDP/GNU-Linux-Tools-Summary/html/x11655.htm), +except that `**` is aka super-asterisk, that do not sensitive for separators. \ No newline at end of file diff --git a/vendor/github.com/gobwas/glob/syntax/ast/ast.go b/vendor/github.com/gobwas/glob/syntax/ast/ast.go new file mode 100644 index 0000000000..3220a694a9 --- /dev/null +++ b/vendor/github.com/gobwas/glob/syntax/ast/ast.go @@ -0,0 +1,122 @@ +package ast + +import ( + "bytes" + "fmt" +) + +type Node struct { + Parent *Node + Children []*Node + Value interface{} + Kind Kind +} + +func NewNode(k Kind, v interface{}, ch ...*Node) *Node { + n := &Node{ + Kind: k, + Value: v, + } + for _, c := range ch { + Insert(n, c) + } + return n +} + +func (a *Node) Equal(b *Node) bool { + if a.Kind != b.Kind { + return false + } + if a.Value != b.Value { + return false + } + if len(a.Children) != len(b.Children) { + return false + } + for i, c := range a.Children { + if !c.Equal(b.Children[i]) { + return false + } + } + return true +} + +func (a *Node) String() string { + var buf bytes.Buffer + buf.WriteString(a.Kind.String()) + if a.Value != nil { + buf.WriteString(" =") + buf.WriteString(fmt.Sprintf("%v", a.Value)) + } + if len(a.Children) > 0 { + buf.WriteString(" [") + for i, c := range a.Children { + if i > 0 { + buf.WriteString(", ") + } + buf.WriteString(c.String()) + } + buf.WriteString("]") + } + return buf.String() +} + +func Insert(parent *Node, children ...*Node) { + parent.Children = append(parent.Children, children...) + for _, ch := range children { + ch.Parent = parent + } +} + +type List struct { + Not bool + Chars string +} + +type Range struct { + Not bool + Lo, Hi rune +} + +type Text struct { + Text string +} + +type Kind int + +const ( + KindNothing Kind = iota + KindPattern + KindList + KindRange + KindText + KindAny + KindSuper + KindSingle + KindAnyOf +) + +func (k Kind) String() string { + switch k { + case KindNothing: + return "Nothing" + case KindPattern: + return "Pattern" + case KindList: + return "List" + case KindRange: + return "Range" + case KindText: + return "Text" + case KindAny: + return "Any" + case KindSuper: + return "Super" + case KindSingle: + return "Single" + case KindAnyOf: + return "AnyOf" + default: + return "" + } +} diff --git a/vendor/github.com/gobwas/glob/syntax/ast/parser.go b/vendor/github.com/gobwas/glob/syntax/ast/parser.go new file mode 100644 index 0000000000..429b409430 --- /dev/null +++ b/vendor/github.com/gobwas/glob/syntax/ast/parser.go @@ -0,0 +1,157 @@ +package ast + +import ( + "errors" + "fmt" + "github.com/gobwas/glob/syntax/lexer" + "unicode/utf8" +) + +type Lexer interface { + Next() lexer.Token +} + +type parseFn func(*Node, Lexer) (parseFn, *Node, error) + +func Parse(lexer Lexer) (*Node, error) { + var parser parseFn + + root := NewNode(KindPattern, nil) + + var ( + tree *Node + err error + ) + for parser, tree = parserMain, root; parser != nil; { + parser, tree, err = parser(tree, lexer) + if err != nil { + return nil, err + } + } + + return root, nil +} + +func parserMain(tree *Node, lex Lexer) (parseFn, *Node, error) { + for { + token := lex.Next() + switch token.Type { + case lexer.EOF: + return nil, tree, nil + + case lexer.Error: + return nil, tree, errors.New(token.Raw) + + case lexer.Text: + Insert(tree, NewNode(KindText, Text{token.Raw})) + return parserMain, tree, nil + + case lexer.Any: + Insert(tree, NewNode(KindAny, nil)) + return parserMain, tree, nil + + case lexer.Super: + Insert(tree, NewNode(KindSuper, nil)) + return parserMain, tree, nil + + case lexer.Single: + Insert(tree, NewNode(KindSingle, nil)) + return parserMain, tree, nil + + case lexer.RangeOpen: + return parserRange, tree, nil + + case lexer.TermsOpen: + a := NewNode(KindAnyOf, nil) + Insert(tree, a) + + p := NewNode(KindPattern, nil) + Insert(a, p) + + return parserMain, p, nil + + case lexer.Separator: + p := NewNode(KindPattern, nil) + Insert(tree.Parent, p) + + return parserMain, p, nil + + case lexer.TermsClose: + return parserMain, tree.Parent.Parent, nil + + default: + return nil, tree, fmt.Errorf("unexpected token: %s", token) + } + } + return nil, tree, fmt.Errorf("unknown error") +} + +func parserRange(tree *Node, lex Lexer) (parseFn, *Node, error) { + var ( + not bool + lo rune + hi rune + chars string + ) + for { + token := lex.Next() + switch token.Type { + case lexer.EOF: + return nil, tree, errors.New("unexpected end") + + case lexer.Error: + return nil, tree, errors.New(token.Raw) + + case lexer.Not: + not = true + + case lexer.RangeLo: + r, w := utf8.DecodeRuneInString(token.Raw) + if len(token.Raw) > w { + return nil, tree, fmt.Errorf("unexpected length of lo character") + } + lo = r + + case lexer.RangeBetween: + // + + case lexer.RangeHi: + r, w := utf8.DecodeRuneInString(token.Raw) + if len(token.Raw) > w { + return nil, tree, fmt.Errorf("unexpected length of lo character") + } + + hi = r + + if hi < lo { + return nil, tree, fmt.Errorf("hi character '%s' should be greater than lo '%s'", string(hi), string(lo)) + } + + case lexer.Text: + chars = token.Raw + + case lexer.RangeClose: + isRange := lo != 0 && hi != 0 + isChars := chars != "" + + if isChars == isRange { + return nil, tree, fmt.Errorf("could not parse range") + } + + if isRange { + Insert(tree, NewNode(KindRange, Range{ + Lo: lo, + Hi: hi, + Not: not, + })) + } else { + Insert(tree, NewNode(KindList, List{ + Chars: chars, + Not: not, + })) + } + + return parserMain, tree, nil + } + } +} diff --git a/vendor/github.com/gobwas/glob/syntax/lexer/lexer.go b/vendor/github.com/gobwas/glob/syntax/lexer/lexer.go new file mode 100644 index 0000000000..a1c8d1962a --- /dev/null +++ b/vendor/github.com/gobwas/glob/syntax/lexer/lexer.go @@ -0,0 +1,273 @@ +package lexer + +import ( + "bytes" + "fmt" + "github.com/gobwas/glob/util/runes" + "unicode/utf8" +) + +const ( + char_any = '*' + char_comma = ',' + char_single = '?' + char_escape = '\\' + char_range_open = '[' + char_range_close = ']' + char_terms_open = '{' + char_terms_close = '}' + char_range_not = '!' + char_range_between = '-' +) + +var specials = []byte{ + char_any, + char_single, + char_escape, + char_range_open, + char_range_close, + char_terms_open, + char_terms_close, +} + +func Special(c byte) bool { + return bytes.IndexByte(specials, c) != -1 +} + +type tokens []Token + +func (i *tokens) shift() (ret Token) { + ret = (*i)[0] + copy(*i, (*i)[1:]) + *i = (*i)[:len(*i)-1] + return +} + +func (i *tokens) push(v Token) { + *i = append(*i, v) +} + +func (i *tokens) empty() bool { + return len(*i) == 0 +} + +var eof rune = 0 + +type lexer struct { + data string + pos int + err error + + tokens tokens + termsLevel int + + lastRune rune + lastRuneSize int + hasRune bool +} + +func NewLexer(source string) *lexer { + l := &lexer{ + data: source, + tokens: tokens(make([]Token, 0, 4)), + } + return l +} + +func (l *lexer) Next() Token { + if l.err != nil { + return Token{Error, l.err.Error()} + } + if !l.tokens.empty() { + return l.tokens.shift() + } + + l.fetchItem() + return l.Next() +} + +func (l *lexer) peek() (r rune, w int) { + if l.pos == len(l.data) { + return eof, 0 + } + + r, w = utf8.DecodeRuneInString(l.data[l.pos:]) + if r == utf8.RuneError { + l.errorf("could not read rune") + r = eof + w = 0 + } + + return +} + +func (l *lexer) read() rune { + if l.hasRune { + l.hasRune = false + l.seek(l.lastRuneSize) + return l.lastRune + } + + r, s := l.peek() + l.seek(s) + + l.lastRune = r + l.lastRuneSize = s + + return r +} + +func (l *lexer) seek(w int) { + l.pos += w +} + +func (l *lexer) unread() { + if l.hasRune { + l.errorf("could not unread rune") + return + } + l.seek(-l.lastRuneSize) + l.hasRune = true +} + +func (l *lexer) errorf(f string, v ...interface{}) { + l.err = fmt.Errorf(f, v...) +} + +func (l *lexer) inTerms() bool { + return l.termsLevel > 0 +} + +func (l *lexer) termsEnter() { + l.termsLevel++ +} + +func (l *lexer) termsLeave() { + l.termsLevel-- +} + +var inTextBreakers = []rune{char_single, char_any, char_range_open, char_terms_open} +var inTermsBreakers = append(inTextBreakers, char_terms_close, char_comma) + +func (l *lexer) fetchItem() { + r := l.read() + switch { + case r == eof: + l.tokens.push(Token{EOF, ""}) + + case r == char_terms_open: + l.termsEnter() + l.tokens.push(Token{TermsOpen, string(r)}) + + case r == char_comma && l.inTerms(): + l.tokens.push(Token{Separator, string(r)}) + + case r == char_terms_close && l.inTerms(): + l.tokens.push(Token{TermsClose, string(r)}) + l.termsLeave() + + case r == char_range_open: + l.tokens.push(Token{RangeOpen, string(r)}) + l.fetchRange() + + case r == char_single: + l.tokens.push(Token{Single, string(r)}) + + case r == char_any: + if l.read() == char_any { + l.tokens.push(Token{Super, string(r) + string(r)}) + } else { + l.unread() + l.tokens.push(Token{Any, string(r)}) + } + + default: + l.unread() + + var breakers []rune + if l.inTerms() { + breakers = inTermsBreakers + } else { + breakers = inTextBreakers + } + l.fetchText(breakers) + } +} + +func (l *lexer) fetchRange() { + var wantHi bool + var wantClose bool + var seenNot bool + for { + r := l.read() + if r == eof { + l.errorf("unexpected end of input") + return + } + + if wantClose { + if r != char_range_close { + l.errorf("expected close range character") + } else { + l.tokens.push(Token{RangeClose, string(r)}) + } + return + } + + if wantHi { + l.tokens.push(Token{RangeHi, string(r)}) + wantClose = true + continue + } + + if !seenNot && r == char_range_not { + l.tokens.push(Token{Not, string(r)}) + seenNot = true + continue + } + + if n, w := l.peek(); n == char_range_between { + l.seek(w) + l.tokens.push(Token{RangeLo, string(r)}) + l.tokens.push(Token{RangeBetween, string(n)}) + wantHi = true + continue + } + + l.unread() // unread first peek and fetch as text + l.fetchText([]rune{char_range_close}) + wantClose = true + } +} + +func (l *lexer) fetchText(breakers []rune) { + var data []rune + var escaped bool + +reading: + for { + r := l.read() + if r == eof { + break + } + + if !escaped { + if r == char_escape { + escaped = true + continue + } + + if runes.IndexRune(breakers, r) != -1 { + l.unread() + break reading + } + } + + escaped = false + data = append(data, r) + } + + if len(data) > 0 { + l.tokens.push(Token{Text, string(data)}) + } +} diff --git a/vendor/github.com/gobwas/glob/syntax/lexer/token.go b/vendor/github.com/gobwas/glob/syntax/lexer/token.go new file mode 100644 index 0000000000..2797c4e83a --- /dev/null +++ b/vendor/github.com/gobwas/glob/syntax/lexer/token.go @@ -0,0 +1,88 @@ +package lexer + +import "fmt" + +type TokenType int + +const ( + EOF TokenType = iota + Error + Text + Char + Any + Super + Single + Not + Separator + RangeOpen + RangeClose + RangeLo + RangeHi + RangeBetween + TermsOpen + TermsClose +) + +func (tt TokenType) String() string { + switch tt { + case EOF: + return "eof" + + case Error: + return "error" + + case Text: + return "text" + + case Char: + return "char" + + case Any: + return "any" + + case Super: + return "super" + + case Single: + return "single" + + case Not: + return "not" + + case Separator: + return "separator" + + case RangeOpen: + return "range_open" + + case RangeClose: + return "range_close" + + case RangeLo: + return "range_lo" + + case RangeHi: + return "range_hi" + + case RangeBetween: + return "range_between" + + case TermsOpen: + return "terms_open" + + case TermsClose: + return "terms_close" + + default: + return "undef" + } +} + +type Token struct { + Type TokenType + Raw string +} + +func (t Token) String() string { + return fmt.Sprintf("%v<%q>", t.Type, t.Raw) +} diff --git a/vendor/github.com/gobwas/glob/syntax/syntax.go b/vendor/github.com/gobwas/glob/syntax/syntax.go new file mode 100644 index 0000000000..1d168b1482 --- /dev/null +++ b/vendor/github.com/gobwas/glob/syntax/syntax.go @@ -0,0 +1,14 @@ +package syntax + +import ( + "github.com/gobwas/glob/syntax/ast" + "github.com/gobwas/glob/syntax/lexer" +) + +func Parse(s string) (*ast.Node, error) { + return ast.Parse(lexer.NewLexer(s)) +} + +func Special(b byte) bool { + return lexer.Special(b) +} diff --git a/vendor/github.com/gobwas/glob/util/runes/runes.go b/vendor/github.com/gobwas/glob/util/runes/runes.go new file mode 100644 index 0000000000..a723556410 --- /dev/null +++ b/vendor/github.com/gobwas/glob/util/runes/runes.go @@ -0,0 +1,154 @@ +package runes + +func Index(s, needle []rune) int { + ls, ln := len(s), len(needle) + + switch { + case ln == 0: + return 0 + case ln == 1: + return IndexRune(s, needle[0]) + case ln == ls: + if Equal(s, needle) { + return 0 + } + return -1 + case ln > ls: + return -1 + } + +head: + for i := 0; i < ls && ls-i >= ln; i++ { + for y := 0; y < ln; y++ { + if s[i+y] != needle[y] { + continue head + } + } + + return i + } + + return -1 +} + +func LastIndex(s, needle []rune) int { + ls, ln := len(s), len(needle) + + switch { + case ln == 0: + if ls == 0 { + return 0 + } + return ls + case ln == 1: + return IndexLastRune(s, needle[0]) + case ln == ls: + if Equal(s, needle) { + return 0 + } + return -1 + case ln > ls: + return -1 + } + +head: + for i := ls - 1; i >= 0 && i >= ln; i-- { + for y := ln - 1; y >= 0; y-- { + if s[i-(ln-y-1)] != needle[y] { + continue head + } + } + + return i - ln + 1 + } + + return -1 +} + +// IndexAny returns the index of the first instance of any Unicode code point +// from chars in s, or -1 if no Unicode code point from chars is present in s. +func IndexAny(s, chars []rune) int { + if len(chars) > 0 { + for i, c := range s { + for _, m := range chars { + if c == m { + return i + } + } + } + } + return -1 +} + +func Contains(s, needle []rune) bool { + return Index(s, needle) >= 0 +} + +func Max(s []rune) (max rune) { + for _, r := range s { + if r > max { + max = r + } + } + + return +} + +func Min(s []rune) rune { + min := rune(-1) + for _, r := range s { + if min == -1 { + min = r + continue + } + + if r < min { + min = r + } + } + + return min +} + +func IndexRune(s []rune, r rune) int { + for i, c := range s { + if c == r { + return i + } + } + return -1 +} + +func IndexLastRune(s []rune, r rune) int { + for i := len(s) - 1; i >= 0; i-- { + if s[i] == r { + return i + } + } + + return -1 +} + +func Equal(a, b []rune) bool { + if len(a) == len(b) { + for i := 0; i < len(a); i++ { + if a[i] != b[i] { + return false + } + } + + return true + } + + return false +} + +// HasPrefix tests whether the string s begins with prefix. +func HasPrefix(s, prefix []rune) bool { + return len(s) >= len(prefix) && Equal(s[0:len(prefix)], prefix) +} + +// HasSuffix tests whether the string s ends with suffix. +func HasSuffix(s, suffix []rune) bool { + return len(s) >= len(suffix) && Equal(s[len(s)-len(suffix):], suffix) +} diff --git a/vendor/github.com/gobwas/glob/util/strings/strings.go b/vendor/github.com/gobwas/glob/util/strings/strings.go new file mode 100644 index 0000000000..e8ee1920b1 --- /dev/null +++ b/vendor/github.com/gobwas/glob/util/strings/strings.go @@ -0,0 +1,39 @@ +package strings + +import ( + "strings" + "unicode/utf8" +) + +func IndexAnyRunes(s string, rs []rune) int { + for _, r := range rs { + if i := strings.IndexRune(s, r); i != -1 { + return i + } + } + + return -1 +} + +func LastIndexAnyRunes(s string, rs []rune) int { + for _, r := range rs { + i := -1 + if 0 <= r && r < utf8.RuneSelf { + i = strings.LastIndexByte(s, byte(r)) + } else { + sub := s + for len(sub) > 0 { + j := strings.IndexRune(s, r) + if j == -1 { + break + } + i = j + sub = sub[i+1:] + } + } + if i != -1 { + return i + } + } + return -1 +} diff --git a/vendor/github.com/gorilla/mux/AUTHORS b/vendor/github.com/gorilla/mux/AUTHORS new file mode 100644 index 0000000000..b722392ee5 --- /dev/null +++ b/vendor/github.com/gorilla/mux/AUTHORS @@ -0,0 +1,8 @@ +# This is the official list of gorilla/mux authors for copyright purposes. +# +# Please keep the list sorted. + +Google LLC (https://opensource.google.com/) +Kamil Kisielk +Matt Silverlock +Rodrigo Moraes (https://github.com/moraes) diff --git a/vendor/github.com/gorilla/mux/LICENSE b/vendor/github.com/gorilla/mux/LICENSE new file mode 100644 index 0000000000..6903df6386 --- /dev/null +++ b/vendor/github.com/gorilla/mux/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2012-2018 The Gorilla Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/gorilla/mux/README.md b/vendor/github.com/gorilla/mux/README.md new file mode 100644 index 0000000000..35eea9f106 --- /dev/null +++ b/vendor/github.com/gorilla/mux/README.md @@ -0,0 +1,805 @@ +# gorilla/mux + +[![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux) +[![CircleCI](https://circleci.com/gh/gorilla/mux.svg?style=svg)](https://circleci.com/gh/gorilla/mux) +[![Sourcegraph](https://sourcegraph.com/github.com/gorilla/mux/-/badge.svg)](https://sourcegraph.com/github.com/gorilla/mux?badge) + +![Gorilla Logo](https://cloud-cdn.questionable.services/gorilla-icon-64.png) + +https://www.gorillatoolkit.org/pkg/mux + +Package `gorilla/mux` implements a request router and dispatcher for matching incoming requests to +their respective handler. + +The name mux stands for "HTTP request multiplexer". Like the standard `http.ServeMux`, `mux.Router` matches incoming requests against a list of registered routes and calls a handler for the route that matches the URL or other conditions. The main features are: + +* It implements the `http.Handler` interface so it is compatible with the standard `http.ServeMux`. +* Requests can be matched based on URL host, path, path prefix, schemes, header and query values, HTTP methods or using custom matchers. +* URL hosts, paths and query values can have variables with an optional regular expression. +* Registered URLs can be built, or "reversed", which helps maintaining references to resources. +* Routes can be used as subrouters: nested routes are only tested if the parent route matches. This is useful to define groups of routes that share common conditions like a host, a path prefix or other repeated attributes. As a bonus, this optimizes request matching. + +--- + +* [Install](#install) +* [Examples](#examples) +* [Matching Routes](#matching-routes) +* [Static Files](#static-files) +* [Serving Single Page Applications](#serving-single-page-applications) (e.g. React, Vue, Ember.js, etc.) +* [Registered URLs](#registered-urls) +* [Walking Routes](#walking-routes) +* [Graceful Shutdown](#graceful-shutdown) +* [Middleware](#middleware) +* [Handling CORS Requests](#handling-cors-requests) +* [Testing Handlers](#testing-handlers) +* [Full Example](#full-example) + +--- + +## Install + +With a [correctly configured](https://golang.org/doc/install#testing) Go toolchain: + +```sh +go get -u github.com/gorilla/mux +``` + +## Examples + +Let's start registering a couple of URL paths and handlers: + +```go +func main() { + r := mux.NewRouter() + r.HandleFunc("/", HomeHandler) + r.HandleFunc("/products", ProductsHandler) + r.HandleFunc("/articles", ArticlesHandler) + http.Handle("/", r) +} +``` + +Here we register three routes mapping URL paths to handlers. This is equivalent to how `http.HandleFunc()` works: if an incoming request URL matches one of the paths, the corresponding handler is called passing (`http.ResponseWriter`, `*http.Request`) as parameters. + +Paths can have variables. They are defined using the format `{name}` or `{name:pattern}`. If a regular expression pattern is not defined, the matched variable will be anything until the next slash. For example: + +```go +r := mux.NewRouter() +r.HandleFunc("/products/{key}", ProductHandler) +r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) +r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) +``` + +The names are used to create a map of route variables which can be retrieved calling `mux.Vars()`: + +```go +func ArticlesCategoryHandler(w http.ResponseWriter, r *http.Request) { + vars := mux.Vars(r) + w.WriteHeader(http.StatusOK) + fmt.Fprintf(w, "Category: %v\n", vars["category"]) +} +``` + +And this is all you need to know about the basic usage. More advanced options are explained below. + +### Matching Routes + +Routes can also be restricted to a domain or subdomain. Just define a host pattern to be matched. They can also have variables: + +```go +r := mux.NewRouter() +// Only matches if domain is "www.example.com". +r.Host("www.example.com") +// Matches a dynamic subdomain. +r.Host("{subdomain:[a-z]+}.example.com") +``` + +There are several other matchers that can be added. To match path prefixes: + +```go +r.PathPrefix("/products/") +``` + +...or HTTP methods: + +```go +r.Methods("GET", "POST") +``` + +...or URL schemes: + +```go +r.Schemes("https") +``` + +...or header values: + +```go +r.Headers("X-Requested-With", "XMLHttpRequest") +``` + +...or query values: + +```go +r.Queries("key", "value") +``` + +...or to use a custom matcher function: + +```go +r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { + return r.ProtoMajor == 0 +}) +``` + +...and finally, it is possible to combine several matchers in a single route: + +```go +r.HandleFunc("/products", ProductsHandler). + Host("www.example.com"). + Methods("GET"). + Schemes("http") +``` + +Routes are tested in the order they were added to the router. If two routes match, the first one wins: + +```go +r := mux.NewRouter() +r.HandleFunc("/specific", specificHandler) +r.PathPrefix("/").Handler(catchAllHandler) +``` + +Setting the same matching conditions again and again can be boring, so we have a way to group several routes that share the same requirements. We call it "subrouting". + +For example, let's say we have several URLs that should only match when the host is `www.example.com`. Create a route for that host and get a "subrouter" from it: + +```go +r := mux.NewRouter() +s := r.Host("www.example.com").Subrouter() +``` + +Then register routes in the subrouter: + +```go +s.HandleFunc("/products/", ProductsHandler) +s.HandleFunc("/products/{key}", ProductHandler) +s.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) +``` + +The three URL paths we registered above will only be tested if the domain is `www.example.com`, because the subrouter is tested first. This is not only convenient, but also optimizes request matching. You can create subrouters combining any attribute matchers accepted by a route. + +Subrouters can be used to create domain or path "namespaces": you define subrouters in a central place and then parts of the app can register its paths relatively to a given subrouter. + +There's one more thing about subroutes. When a subrouter has a path prefix, the inner routes use it as base for their paths: + +```go +r := mux.NewRouter() +s := r.PathPrefix("/products").Subrouter() +// "/products/" +s.HandleFunc("/", ProductsHandler) +// "/products/{key}/" +s.HandleFunc("/{key}/", ProductHandler) +// "/products/{key}/details" +s.HandleFunc("/{key}/details", ProductDetailsHandler) +``` + + +### Static Files + +Note that the path provided to `PathPrefix()` represents a "wildcard": calling +`PathPrefix("/static/").Handler(...)` means that the handler will be passed any +request that matches "/static/\*". This makes it easy to serve static files with mux: + +```go +func main() { + var dir string + + flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir") + flag.Parse() + r := mux.NewRouter() + + // This will serve files under http://localhost:8000/static/ + r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir)))) + + srv := &http.Server{ + Handler: r, + Addr: "127.0.0.1:8000", + // Good practice: enforce timeouts for servers you create! + WriteTimeout: 15 * time.Second, + ReadTimeout: 15 * time.Second, + } + + log.Fatal(srv.ListenAndServe()) +} +``` + +### Serving Single Page Applications + +Most of the time it makes sense to serve your SPA on a separate web server from your API, +but sometimes it's desirable to serve them both from one place. It's possible to write a simple +handler for serving your SPA (for use with React Router's [BrowserRouter](https://reacttraining.com/react-router/web/api/BrowserRouter) for example), and leverage +mux's powerful routing for your API endpoints. + +```go +package main + +import ( + "encoding/json" + "log" + "net/http" + "os" + "path/filepath" + "time" + + "github.com/gorilla/mux" +) + +// spaHandler implements the http.Handler interface, so we can use it +// to respond to HTTP requests. The path to the static directory and +// path to the index file within that static directory are used to +// serve the SPA in the given static directory. +type spaHandler struct { + staticPath string + indexPath string +} + +// ServeHTTP inspects the URL path to locate a file within the static dir +// on the SPA handler. If a file is found, it will be served. If not, the +// file located at the index path on the SPA handler will be served. This +// is suitable behavior for serving an SPA (single page application). +func (h spaHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + // get the absolute path to prevent directory traversal + path, err := filepath.Abs(r.URL.Path) + if err != nil { + // if we failed to get the absolute path respond with a 400 bad request + // and stop + http.Error(w, err.Error(), http.StatusBadRequest) + return + } + + // prepend the path with the path to the static directory + path = filepath.Join(h.staticPath, path) + + // check whether a file exists at the given path + _, err = os.Stat(path) + if os.IsNotExist(err) { + // file does not exist, serve index.html + http.ServeFile(w, r, filepath.Join(h.staticPath, h.indexPath)) + return + } else if err != nil { + // if we got an error (that wasn't that the file doesn't exist) stating the + // file, return a 500 internal server error and stop + http.Error(w, err.Error(), http.StatusInternalServerError) + return + } + + // otherwise, use http.FileServer to serve the static dir + http.FileServer(http.Dir(h.staticPath)).ServeHTTP(w, r) +} + +func main() { + router := mux.NewRouter() + + router.HandleFunc("/api/health", func(w http.ResponseWriter, r *http.Request) { + // an example API handler + json.NewEncoder(w).Encode(map[string]bool{"ok": true}) + }) + + spa := spaHandler{staticPath: "build", indexPath: "index.html"} + router.PathPrefix("/").Handler(spa) + + srv := &http.Server{ + Handler: router, + Addr: "127.0.0.1:8000", + // Good practice: enforce timeouts for servers you create! + WriteTimeout: 15 * time.Second, + ReadTimeout: 15 * time.Second, + } + + log.Fatal(srv.ListenAndServe()) +} +``` + +### Registered URLs + +Now let's see how to build registered URLs. + +Routes can be named. All routes that define a name can have their URLs built, or "reversed". We define a name calling `Name()` on a route. For example: + +```go +r := mux.NewRouter() +r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). + Name("article") +``` + +To build a URL, get the route and call the `URL()` method, passing a sequence of key/value pairs for the route variables. For the previous route, we would do: + +```go +url, err := r.Get("article").URL("category", "technology", "id", "42") +``` + +...and the result will be a `url.URL` with the following path: + +``` +"/articles/technology/42" +``` + +This also works for host and query value variables: + +```go +r := mux.NewRouter() +r.Host("{subdomain}.example.com"). + Path("/articles/{category}/{id:[0-9]+}"). + Queries("filter", "{filter}"). + HandlerFunc(ArticleHandler). + Name("article") + +// url.String() will be "http://news.example.com/articles/technology/42?filter=gorilla" +url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42", + "filter", "gorilla") +``` + +All variables defined in the route are required, and their values must conform to the corresponding patterns. These requirements guarantee that a generated URL will always match a registered route -- the only exception is for explicitly defined "build-only" routes which never match. + +Regex support also exists for matching Headers within a route. For example, we could do: + +```go +r.HeadersRegexp("Content-Type", "application/(text|json)") +``` + +...and the route will match both requests with a Content-Type of `application/json` as well as `application/text` + +There's also a way to build only the URL host or path for a route: use the methods `URLHost()` or `URLPath()` instead. For the previous route, we would do: + +```go +// "http://news.example.com/" +host, err := r.Get("article").URLHost("subdomain", "news") + +// "/articles/technology/42" +path, err := r.Get("article").URLPath("category", "technology", "id", "42") +``` + +And if you use subrouters, host and path defined separately can be built as well: + +```go +r := mux.NewRouter() +s := r.Host("{subdomain}.example.com").Subrouter() +s.Path("/articles/{category}/{id:[0-9]+}"). + HandlerFunc(ArticleHandler). + Name("article") + +// "http://news.example.com/articles/technology/42" +url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42") +``` + +### Walking Routes + +The `Walk` function on `mux.Router` can be used to visit all of the routes that are registered on a router. For example, +the following prints all of the registered routes: + +```go +package main + +import ( + "fmt" + "net/http" + "strings" + + "github.com/gorilla/mux" +) + +func handler(w http.ResponseWriter, r *http.Request) { + return +} + +func main() { + r := mux.NewRouter() + r.HandleFunc("/", handler) + r.HandleFunc("/products", handler).Methods("POST") + r.HandleFunc("/articles", handler).Methods("GET") + r.HandleFunc("/articles/{id}", handler).Methods("GET", "PUT") + r.HandleFunc("/authors", handler).Queries("surname", "{surname}") + err := r.Walk(func(route *mux.Route, router *mux.Router, ancestors []*mux.Route) error { + pathTemplate, err := route.GetPathTemplate() + if err == nil { + fmt.Println("ROUTE:", pathTemplate) + } + pathRegexp, err := route.GetPathRegexp() + if err == nil { + fmt.Println("Path regexp:", pathRegexp) + } + queriesTemplates, err := route.GetQueriesTemplates() + if err == nil { + fmt.Println("Queries templates:", strings.Join(queriesTemplates, ",")) + } + queriesRegexps, err := route.GetQueriesRegexp() + if err == nil { + fmt.Println("Queries regexps:", strings.Join(queriesRegexps, ",")) + } + methods, err := route.GetMethods() + if err == nil { + fmt.Println("Methods:", strings.Join(methods, ",")) + } + fmt.Println() + return nil + }) + + if err != nil { + fmt.Println(err) + } + + http.Handle("/", r) +} +``` + +### Graceful Shutdown + +Go 1.8 introduced the ability to [gracefully shutdown](https://golang.org/doc/go1.8#http_shutdown) a `*http.Server`. Here's how to do that alongside `mux`: + +```go +package main + +import ( + "context" + "flag" + "log" + "net/http" + "os" + "os/signal" + "time" + + "github.com/gorilla/mux" +) + +func main() { + var wait time.Duration + flag.DurationVar(&wait, "graceful-timeout", time.Second * 15, "the duration for which the server gracefully wait for existing connections to finish - e.g. 15s or 1m") + flag.Parse() + + r := mux.NewRouter() + // Add your routes as needed + + srv := &http.Server{ + Addr: "0.0.0.0:8080", + // Good practice to set timeouts to avoid Slowloris attacks. + WriteTimeout: time.Second * 15, + ReadTimeout: time.Second * 15, + IdleTimeout: time.Second * 60, + Handler: r, // Pass our instance of gorilla/mux in. + } + + // Run our server in a goroutine so that it doesn't block. + go func() { + if err := srv.ListenAndServe(); err != nil { + log.Println(err) + } + }() + + c := make(chan os.Signal, 1) + // We'll accept graceful shutdowns when quit via SIGINT (Ctrl+C) + // SIGKILL, SIGQUIT or SIGTERM (Ctrl+/) will not be caught. + signal.Notify(c, os.Interrupt) + + // Block until we receive our signal. + <-c + + // Create a deadline to wait for. + ctx, cancel := context.WithTimeout(context.Background(), wait) + defer cancel() + // Doesn't block if no connections, but will otherwise wait + // until the timeout deadline. + srv.Shutdown(ctx) + // Optionally, you could run srv.Shutdown in a goroutine and block on + // <-ctx.Done() if your application should wait for other services + // to finalize based on context cancellation. + log.Println("shutting down") + os.Exit(0) +} +``` + +### Middleware + +Mux supports the addition of middlewares to a [Router](https://godoc.org/github.com/gorilla/mux#Router), which are executed in the order they are added if a match is found, including its subrouters. +Middlewares are (typically) small pieces of code which take one request, do something with it, and pass it down to another middleware or the final handler. Some common use cases for middleware are request logging, header manipulation, or `ResponseWriter` hijacking. + +Mux middlewares are defined using the de facto standard type: + +```go +type MiddlewareFunc func(http.Handler) http.Handler +``` + +Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc. This takes advantage of closures being able access variables from the context where they are created, while retaining the signature enforced by the receivers. + +A very basic middleware which logs the URI of the request being handled could be written as: + +```go +func loggingMiddleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Do stuff here + log.Println(r.RequestURI) + // Call the next handler, which can be another middleware in the chain, or the final handler. + next.ServeHTTP(w, r) + }) +} +``` + +Middlewares can be added to a router using `Router.Use()`: + +```go +r := mux.NewRouter() +r.HandleFunc("/", handler) +r.Use(loggingMiddleware) +``` + +A more complex authentication middleware, which maps session token to users, could be written as: + +```go +// Define our struct +type authenticationMiddleware struct { + tokenUsers map[string]string +} + +// Initialize it somewhere +func (amw *authenticationMiddleware) Populate() { + amw.tokenUsers["00000000"] = "user0" + amw.tokenUsers["aaaaaaaa"] = "userA" + amw.tokenUsers["05f717e5"] = "randomUser" + amw.tokenUsers["deadbeef"] = "user0" +} + +// Middleware function, which will be called for each request +func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + token := r.Header.Get("X-Session-Token") + + if user, found := amw.tokenUsers[token]; found { + // We found the token in our map + log.Printf("Authenticated user %s\n", user) + // Pass down the request to the next middleware (or final handler) + next.ServeHTTP(w, r) + } else { + // Write an error and stop the handler chain + http.Error(w, "Forbidden", http.StatusForbidden) + } + }) +} +``` + +```go +r := mux.NewRouter() +r.HandleFunc("/", handler) + +amw := authenticationMiddleware{} +amw.Populate() + +r.Use(amw.Middleware) +``` + +Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to. Middlewares _should_ write to `ResponseWriter` if they _are_ going to terminate the request, and they _should not_ write to `ResponseWriter` if they _are not_ going to terminate it. + +### Handling CORS Requests + +[CORSMethodMiddleware](https://godoc.org/github.com/gorilla/mux#CORSMethodMiddleware) intends to make it easier to strictly set the `Access-Control-Allow-Methods` response header. + +* You will still need to use your own CORS handler to set the other CORS headers such as `Access-Control-Allow-Origin` +* The middleware will set the `Access-Control-Allow-Methods` header to all the method matchers (e.g. `r.Methods(http.MethodGet, http.MethodPut, http.MethodOptions)` -> `Access-Control-Allow-Methods: GET,PUT,OPTIONS`) on a route +* If you do not specify any methods, then: +> _Important_: there must be an `OPTIONS` method matcher for the middleware to set the headers. + +Here is an example of using `CORSMethodMiddleware` along with a custom `OPTIONS` handler to set all the required CORS headers: + +```go +package main + +import ( + "net/http" + "github.com/gorilla/mux" +) + +func main() { + r := mux.NewRouter() + + // IMPORTANT: you must specify an OPTIONS method matcher for the middleware to set CORS headers + r.HandleFunc("/foo", fooHandler).Methods(http.MethodGet, http.MethodPut, http.MethodPatch, http.MethodOptions) + r.Use(mux.CORSMethodMiddleware(r)) + + http.ListenAndServe(":8080", r) +} + +func fooHandler(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Access-Control-Allow-Origin", "*") + if r.Method == http.MethodOptions { + return + } + + w.Write([]byte("foo")) +} +``` + +And an request to `/foo` using something like: + +```bash +curl localhost:8080/foo -v +``` + +Would look like: + +```bash +* Trying ::1... +* TCP_NODELAY set +* Connected to localhost (::1) port 8080 (#0) +> GET /foo HTTP/1.1 +> Host: localhost:8080 +> User-Agent: curl/7.59.0 +> Accept: */* +> +< HTTP/1.1 200 OK +< Access-Control-Allow-Methods: GET,PUT,PATCH,OPTIONS +< Access-Control-Allow-Origin: * +< Date: Fri, 28 Jun 2019 20:13:30 GMT +< Content-Length: 3 +< Content-Type: text/plain; charset=utf-8 +< +* Connection #0 to host localhost left intact +foo +``` + +### Testing Handlers + +Testing handlers in a Go web application is straightforward, and _mux_ doesn't complicate this any further. Given two files: `endpoints.go` and `endpoints_test.go`, here's how we'd test an application using _mux_. + +First, our simple HTTP handler: + +```go +// endpoints.go +package main + +func HealthCheckHandler(w http.ResponseWriter, r *http.Request) { + // A very simple health check. + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + + // In the future we could report back on the status of our DB, or our cache + // (e.g. Redis) by performing a simple PING, and include them in the response. + io.WriteString(w, `{"alive": true}`) +} + +func main() { + r := mux.NewRouter() + r.HandleFunc("/health", HealthCheckHandler) + + log.Fatal(http.ListenAndServe("localhost:8080", r)) +} +``` + +Our test code: + +```go +// endpoints_test.go +package main + +import ( + "net/http" + "net/http/httptest" + "testing" +) + +func TestHealthCheckHandler(t *testing.T) { + // Create a request to pass to our handler. We don't have any query parameters for now, so we'll + // pass 'nil' as the third parameter. + req, err := http.NewRequest("GET", "/health", nil) + if err != nil { + t.Fatal(err) + } + + // We create a ResponseRecorder (which satisfies http.ResponseWriter) to record the response. + rr := httptest.NewRecorder() + handler := http.HandlerFunc(HealthCheckHandler) + + // Our handlers satisfy http.Handler, so we can call their ServeHTTP method + // directly and pass in our Request and ResponseRecorder. + handler.ServeHTTP(rr, req) + + // Check the status code is what we expect. + if status := rr.Code; status != http.StatusOK { + t.Errorf("handler returned wrong status code: got %v want %v", + status, http.StatusOK) + } + + // Check the response body is what we expect. + expected := `{"alive": true}` + if rr.Body.String() != expected { + t.Errorf("handler returned unexpected body: got %v want %v", + rr.Body.String(), expected) + } +} +``` + +In the case that our routes have [variables](#examples), we can pass those in the request. We could write +[table-driven tests](https://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go) to test multiple +possible route variables as needed. + +```go +// endpoints.go +func main() { + r := mux.NewRouter() + // A route with a route variable: + r.HandleFunc("/metrics/{type}", MetricsHandler) + + log.Fatal(http.ListenAndServe("localhost:8080", r)) +} +``` + +Our test file, with a table-driven test of `routeVariables`: + +```go +// endpoints_test.go +func TestMetricsHandler(t *testing.T) { + tt := []struct{ + routeVariable string + shouldPass bool + }{ + {"goroutines", true}, + {"heap", true}, + {"counters", true}, + {"queries", true}, + {"adhadaeqm3k", false}, + } + + for _, tc := range tt { + path := fmt.Sprintf("/metrics/%s", tc.routeVariable) + req, err := http.NewRequest("GET", path, nil) + if err != nil { + t.Fatal(err) + } + + rr := httptest.NewRecorder() + + // Need to create a router that we can pass the request through so that the vars will be added to the context + router := mux.NewRouter() + router.HandleFunc("/metrics/{type}", MetricsHandler) + router.ServeHTTP(rr, req) + + // In this case, our MetricsHandler returns a non-200 response + // for a route variable it doesn't know about. + if rr.Code == http.StatusOK && !tc.shouldPass { + t.Errorf("handler should have failed on routeVariable %s: got %v want %v", + tc.routeVariable, rr.Code, http.StatusOK) + } + } +} +``` + +## Full Example + +Here's a complete, runnable example of a small `mux` based server: + +```go +package main + +import ( + "net/http" + "log" + "github.com/gorilla/mux" +) + +func YourHandler(w http.ResponseWriter, r *http.Request) { + w.Write([]byte("Gorilla!\n")) +} + +func main() { + r := mux.NewRouter() + // Routes consist of a path and a handler function. + r.HandleFunc("/", YourHandler) + + // Bind to a port and pass our router in + log.Fatal(http.ListenAndServe(":8000", r)) +} +``` + +## License + +BSD licensed. See the LICENSE file for details. diff --git a/vendor/github.com/gorilla/mux/doc.go b/vendor/github.com/gorilla/mux/doc.go new file mode 100644 index 0000000000..bd5a38b55d --- /dev/null +++ b/vendor/github.com/gorilla/mux/doc.go @@ -0,0 +1,306 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package mux implements a request router and dispatcher. + +The name mux stands for "HTTP request multiplexer". Like the standard +http.ServeMux, mux.Router matches incoming requests against a list of +registered routes and calls a handler for the route that matches the URL +or other conditions. The main features are: + + * Requests can be matched based on URL host, path, path prefix, schemes, + header and query values, HTTP methods or using custom matchers. + * URL hosts, paths and query values can have variables with an optional + regular expression. + * Registered URLs can be built, or "reversed", which helps maintaining + references to resources. + * Routes can be used as subrouters: nested routes are only tested if the + parent route matches. This is useful to define groups of routes that + share common conditions like a host, a path prefix or other repeated + attributes. As a bonus, this optimizes request matching. + * It implements the http.Handler interface so it is compatible with the + standard http.ServeMux. + +Let's start registering a couple of URL paths and handlers: + + func main() { + r := mux.NewRouter() + r.HandleFunc("/", HomeHandler) + r.HandleFunc("/products", ProductsHandler) + r.HandleFunc("/articles", ArticlesHandler) + http.Handle("/", r) + } + +Here we register three routes mapping URL paths to handlers. This is +equivalent to how http.HandleFunc() works: if an incoming request URL matches +one of the paths, the corresponding handler is called passing +(http.ResponseWriter, *http.Request) as parameters. + +Paths can have variables. They are defined using the format {name} or +{name:pattern}. If a regular expression pattern is not defined, the matched +variable will be anything until the next slash. For example: + + r := mux.NewRouter() + r.HandleFunc("/products/{key}", ProductHandler) + r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) + r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) + +Groups can be used inside patterns, as long as they are non-capturing (?:re). For example: + + r.HandleFunc("/articles/{category}/{sort:(?:asc|desc|new)}", ArticlesCategoryHandler) + +The names are used to create a map of route variables which can be retrieved +calling mux.Vars(): + + vars := mux.Vars(request) + category := vars["category"] + +Note that if any capturing groups are present, mux will panic() during parsing. To prevent +this, convert any capturing groups to non-capturing, e.g. change "/{sort:(asc|desc)}" to +"/{sort:(?:asc|desc)}". This is a change from prior versions which behaved unpredictably +when capturing groups were present. + +And this is all you need to know about the basic usage. More advanced options +are explained below. + +Routes can also be restricted to a domain or subdomain. Just define a host +pattern to be matched. They can also have variables: + + r := mux.NewRouter() + // Only matches if domain is "www.example.com". + r.Host("www.example.com") + // Matches a dynamic subdomain. + r.Host("{subdomain:[a-z]+}.domain.com") + +There are several other matchers that can be added. To match path prefixes: + + r.PathPrefix("/products/") + +...or HTTP methods: + + r.Methods("GET", "POST") + +...or URL schemes: + + r.Schemes("https") + +...or header values: + + r.Headers("X-Requested-With", "XMLHttpRequest") + +...or query values: + + r.Queries("key", "value") + +...or to use a custom matcher function: + + r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { + return r.ProtoMajor == 0 + }) + +...and finally, it is possible to combine several matchers in a single route: + + r.HandleFunc("/products", ProductsHandler). + Host("www.example.com"). + Methods("GET"). + Schemes("http") + +Setting the same matching conditions again and again can be boring, so we have +a way to group several routes that share the same requirements. +We call it "subrouting". + +For example, let's say we have several URLs that should only match when the +host is "www.example.com". Create a route for that host and get a "subrouter" +from it: + + r := mux.NewRouter() + s := r.Host("www.example.com").Subrouter() + +Then register routes in the subrouter: + + s.HandleFunc("/products/", ProductsHandler) + s.HandleFunc("/products/{key}", ProductHandler) + s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) + +The three URL paths we registered above will only be tested if the domain is +"www.example.com", because the subrouter is tested first. This is not +only convenient, but also optimizes request matching. You can create +subrouters combining any attribute matchers accepted by a route. + +Subrouters can be used to create domain or path "namespaces": you define +subrouters in a central place and then parts of the app can register its +paths relatively to a given subrouter. + +There's one more thing about subroutes. When a subrouter has a path prefix, +the inner routes use it as base for their paths: + + r := mux.NewRouter() + s := r.PathPrefix("/products").Subrouter() + // "/products/" + s.HandleFunc("/", ProductsHandler) + // "/products/{key}/" + s.HandleFunc("/{key}/", ProductHandler) + // "/products/{key}/details" + s.HandleFunc("/{key}/details", ProductDetailsHandler) + +Note that the path provided to PathPrefix() represents a "wildcard": calling +PathPrefix("/static/").Handler(...) means that the handler will be passed any +request that matches "/static/*". This makes it easy to serve static files with mux: + + func main() { + var dir string + + flag.StringVar(&dir, "dir", ".", "the directory to serve files from. Defaults to the current dir") + flag.Parse() + r := mux.NewRouter() + + // This will serve files under http://localhost:8000/static/ + r.PathPrefix("/static/").Handler(http.StripPrefix("/static/", http.FileServer(http.Dir(dir)))) + + srv := &http.Server{ + Handler: r, + Addr: "127.0.0.1:8000", + // Good practice: enforce timeouts for servers you create! + WriteTimeout: 15 * time.Second, + ReadTimeout: 15 * time.Second, + } + + log.Fatal(srv.ListenAndServe()) + } + +Now let's see how to build registered URLs. + +Routes can be named. All routes that define a name can have their URLs built, +or "reversed". We define a name calling Name() on a route. For example: + + r := mux.NewRouter() + r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). + Name("article") + +To build a URL, get the route and call the URL() method, passing a sequence of +key/value pairs for the route variables. For the previous route, we would do: + + url, err := r.Get("article").URL("category", "technology", "id", "42") + +...and the result will be a url.URL with the following path: + + "/articles/technology/42" + +This also works for host and query value variables: + + r := mux.NewRouter() + r.Host("{subdomain}.domain.com"). + Path("/articles/{category}/{id:[0-9]+}"). + Queries("filter", "{filter}"). + HandlerFunc(ArticleHandler). + Name("article") + + // url.String() will be "http://news.domain.com/articles/technology/42?filter=gorilla" + url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42", + "filter", "gorilla") + +All variables defined in the route are required, and their values must +conform to the corresponding patterns. These requirements guarantee that a +generated URL will always match a registered route -- the only exception is +for explicitly defined "build-only" routes which never match. + +Regex support also exists for matching Headers within a route. For example, we could do: + + r.HeadersRegexp("Content-Type", "application/(text|json)") + +...and the route will match both requests with a Content-Type of `application/json` as well as +`application/text` + +There's also a way to build only the URL host or path for a route: +use the methods URLHost() or URLPath() instead. For the previous route, +we would do: + + // "http://news.domain.com/" + host, err := r.Get("article").URLHost("subdomain", "news") + + // "/articles/technology/42" + path, err := r.Get("article").URLPath("category", "technology", "id", "42") + +And if you use subrouters, host and path defined separately can be built +as well: + + r := mux.NewRouter() + s := r.Host("{subdomain}.domain.com").Subrouter() + s.Path("/articles/{category}/{id:[0-9]+}"). + HandlerFunc(ArticleHandler). + Name("article") + + // "http://news.domain.com/articles/technology/42" + url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42") + +Mux supports the addition of middlewares to a Router, which are executed in the order they are added if a match is found, including its subrouters. Middlewares are (typically) small pieces of code which take one request, do something with it, and pass it down to another middleware or the final handler. Some common use cases for middleware are request logging, header manipulation, or ResponseWriter hijacking. + + type MiddlewareFunc func(http.Handler) http.Handler + +Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed to it, and then calls the handler passed as parameter to the MiddlewareFunc (closures can access variables from the context where they are created). + +A very basic middleware which logs the URI of the request being handled could be written as: + + func simpleMw(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Do stuff here + log.Println(r.RequestURI) + // Call the next handler, which can be another middleware in the chain, or the final handler. + next.ServeHTTP(w, r) + }) + } + +Middlewares can be added to a router using `Router.Use()`: + + r := mux.NewRouter() + r.HandleFunc("/", handler) + r.Use(simpleMw) + +A more complex authentication middleware, which maps session token to users, could be written as: + + // Define our struct + type authenticationMiddleware struct { + tokenUsers map[string]string + } + + // Initialize it somewhere + func (amw *authenticationMiddleware) Populate() { + amw.tokenUsers["00000000"] = "user0" + amw.tokenUsers["aaaaaaaa"] = "userA" + amw.tokenUsers["05f717e5"] = "randomUser" + amw.tokenUsers["deadbeef"] = "user0" + } + + // Middleware function, which will be called for each request + func (amw *authenticationMiddleware) Middleware(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + token := r.Header.Get("X-Session-Token") + + if user, found := amw.tokenUsers[token]; found { + // We found the token in our map + log.Printf("Authenticated user %s\n", user) + next.ServeHTTP(w, r) + } else { + http.Error(w, "Forbidden", http.StatusForbidden) + } + }) + } + + r := mux.NewRouter() + r.HandleFunc("/", handler) + + amw := authenticationMiddleware{tokenUsers: make(map[string]string)} + amw.Populate() + + r.Use(amw.Middleware) + +Note: The handler chain will be stopped if your middleware doesn't call `next.ServeHTTP()` with the corresponding parameters. This can be used to abort a request if the middleware writer wants to. + +*/ +package mux diff --git a/vendor/github.com/gorilla/mux/middleware.go b/vendor/github.com/gorilla/mux/middleware.go new file mode 100644 index 0000000000..cb51c565eb --- /dev/null +++ b/vendor/github.com/gorilla/mux/middleware.go @@ -0,0 +1,74 @@ +package mux + +import ( + "net/http" + "strings" +) + +// MiddlewareFunc is a function which receives an http.Handler and returns another http.Handler. +// Typically, the returned handler is a closure which does something with the http.ResponseWriter and http.Request passed +// to it, and then calls the handler passed as parameter to the MiddlewareFunc. +type MiddlewareFunc func(http.Handler) http.Handler + +// middleware interface is anything which implements a MiddlewareFunc named Middleware. +type middleware interface { + Middleware(handler http.Handler) http.Handler +} + +// Middleware allows MiddlewareFunc to implement the middleware interface. +func (mw MiddlewareFunc) Middleware(handler http.Handler) http.Handler { + return mw(handler) +} + +// Use appends a MiddlewareFunc to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router. +func (r *Router) Use(mwf ...MiddlewareFunc) { + for _, fn := range mwf { + r.middlewares = append(r.middlewares, fn) + } +} + +// useInterface appends a middleware to the chain. Middleware can be used to intercept or otherwise modify requests and/or responses, and are executed in the order that they are applied to the Router. +func (r *Router) useInterface(mw middleware) { + r.middlewares = append(r.middlewares, mw) +} + +// CORSMethodMiddleware automatically sets the Access-Control-Allow-Methods response header +// on requests for routes that have an OPTIONS method matcher to all the method matchers on +// the route. Routes that do not explicitly handle OPTIONS requests will not be processed +// by the middleware. See examples for usage. +func CORSMethodMiddleware(r *Router) MiddlewareFunc { + return func(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + allMethods, err := getAllMethodsForRoute(r, req) + if err == nil { + for _, v := range allMethods { + if v == http.MethodOptions { + w.Header().Set("Access-Control-Allow-Methods", strings.Join(allMethods, ",")) + } + } + } + + next.ServeHTTP(w, req) + }) + } +} + +// getAllMethodsForRoute returns all the methods from method matchers matching a given +// request. +func getAllMethodsForRoute(r *Router, req *http.Request) ([]string, error) { + var allMethods []string + + for _, route := range r.routes { + var match RouteMatch + if route.Match(req, &match) || match.MatchErr == ErrMethodMismatch { + methods, err := route.GetMethods() + if err != nil { + return nil, err + } + + allMethods = append(allMethods, methods...) + } + } + + return allMethods, nil +} diff --git a/vendor/github.com/gorilla/mux/mux.go b/vendor/github.com/gorilla/mux/mux.go new file mode 100644 index 0000000000..782a34b22a --- /dev/null +++ b/vendor/github.com/gorilla/mux/mux.go @@ -0,0 +1,606 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "context" + "errors" + "fmt" + "net/http" + "path" + "regexp" +) + +var ( + // ErrMethodMismatch is returned when the method in the request does not match + // the method defined against the route. + ErrMethodMismatch = errors.New("method is not allowed") + // ErrNotFound is returned when no route match is found. + ErrNotFound = errors.New("no matching route was found") +) + +// NewRouter returns a new router instance. +func NewRouter() *Router { + return &Router{namedRoutes: make(map[string]*Route)} +} + +// Router registers routes to be matched and dispatches a handler. +// +// It implements the http.Handler interface, so it can be registered to serve +// requests: +// +// var router = mux.NewRouter() +// +// func main() { +// http.Handle("/", router) +// } +// +// Or, for Google App Engine, register it in a init() function: +// +// func init() { +// http.Handle("/", router) +// } +// +// This will send all incoming requests to the router. +type Router struct { + // Configurable Handler to be used when no route matches. + NotFoundHandler http.Handler + + // Configurable Handler to be used when the request method does not match the route. + MethodNotAllowedHandler http.Handler + + // Routes to be matched, in order. + routes []*Route + + // Routes by name for URL building. + namedRoutes map[string]*Route + + // If true, do not clear the request context after handling the request. + // + // Deprecated: No effect, since the context is stored on the request itself. + KeepContext bool + + // Slice of middlewares to be called after a match is found + middlewares []middleware + + // configuration shared with `Route` + routeConf +} + +// common route configuration shared between `Router` and `Route` +type routeConf struct { + // If true, "/path/foo%2Fbar/to" will match the path "/path/{var}/to" + useEncodedPath bool + + // If true, when the path pattern is "/path/", accessing "/path" will + // redirect to the former and vice versa. + strictSlash bool + + // If true, when the path pattern is "/path//to", accessing "/path//to" + // will not redirect + skipClean bool + + // Manager for the variables from host and path. + regexp routeRegexpGroup + + // List of matchers. + matchers []matcher + + // The scheme used when building URLs. + buildScheme string + + buildVarsFunc BuildVarsFunc +} + +// returns an effective deep copy of `routeConf` +func copyRouteConf(r routeConf) routeConf { + c := r + + if r.regexp.path != nil { + c.regexp.path = copyRouteRegexp(r.regexp.path) + } + + if r.regexp.host != nil { + c.regexp.host = copyRouteRegexp(r.regexp.host) + } + + c.regexp.queries = make([]*routeRegexp, 0, len(r.regexp.queries)) + for _, q := range r.regexp.queries { + c.regexp.queries = append(c.regexp.queries, copyRouteRegexp(q)) + } + + c.matchers = make([]matcher, len(r.matchers)) + copy(c.matchers, r.matchers) + + return c +} + +func copyRouteRegexp(r *routeRegexp) *routeRegexp { + c := *r + return &c +} + +// Match attempts to match the given request against the router's registered routes. +// +// If the request matches a route of this router or one of its subrouters the Route, +// Handler, and Vars fields of the the match argument are filled and this function +// returns true. +// +// If the request does not match any of this router's or its subrouters' routes +// then this function returns false. If available, a reason for the match failure +// will be filled in the match argument's MatchErr field. If the match failure type +// (eg: not found) has a registered handler, the handler is assigned to the Handler +// field of the match argument. +func (r *Router) Match(req *http.Request, match *RouteMatch) bool { + for _, route := range r.routes { + if route.Match(req, match) { + // Build middleware chain if no error was found + if match.MatchErr == nil { + for i := len(r.middlewares) - 1; i >= 0; i-- { + match.Handler = r.middlewares[i].Middleware(match.Handler) + } + } + return true + } + } + + if match.MatchErr == ErrMethodMismatch { + if r.MethodNotAllowedHandler != nil { + match.Handler = r.MethodNotAllowedHandler + return true + } + + return false + } + + // Closest match for a router (includes sub-routers) + if r.NotFoundHandler != nil { + match.Handler = r.NotFoundHandler + match.MatchErr = ErrNotFound + return true + } + + match.MatchErr = ErrNotFound + return false +} + +// ServeHTTP dispatches the handler registered in the matched route. +// +// When there is a match, the route variables can be retrieved calling +// mux.Vars(request). +func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { + if !r.skipClean { + path := req.URL.Path + if r.useEncodedPath { + path = req.URL.EscapedPath() + } + // Clean path to canonical form and redirect. + if p := cleanPath(path); p != path { + + // Added 3 lines (Philip Schlump) - It was dropping the query string and #whatever from query. + // This matches with fix in go 1.2 r.c. 4 for same problem. Go Issue: + // http://code.google.com/p/go/issues/detail?id=5252 + url := *req.URL + url.Path = p + p = url.String() + + w.Header().Set("Location", p) + w.WriteHeader(http.StatusMovedPermanently) + return + } + } + var match RouteMatch + var handler http.Handler + if r.Match(req, &match) { + handler = match.Handler + req = requestWithVars(req, match.Vars) + req = requestWithRoute(req, match.Route) + } + + if handler == nil && match.MatchErr == ErrMethodMismatch { + handler = methodNotAllowedHandler() + } + + if handler == nil { + handler = http.NotFoundHandler() + } + + handler.ServeHTTP(w, req) +} + +// Get returns a route registered with the given name. +func (r *Router) Get(name string) *Route { + return r.namedRoutes[name] +} + +// GetRoute returns a route registered with the given name. This method +// was renamed to Get() and remains here for backwards compatibility. +func (r *Router) GetRoute(name string) *Route { + return r.namedRoutes[name] +} + +// StrictSlash defines the trailing slash behavior for new routes. The initial +// value is false. +// +// When true, if the route path is "/path/", accessing "/path" will perform a redirect +// to the former and vice versa. In other words, your application will always +// see the path as specified in the route. +// +// When false, if the route path is "/path", accessing "/path/" will not match +// this route and vice versa. +// +// The re-direct is a HTTP 301 (Moved Permanently). Note that when this is set for +// routes with a non-idempotent method (e.g. POST, PUT), the subsequent re-directed +// request will be made as a GET by most clients. Use middleware or client settings +// to modify this behaviour as needed. +// +// Special case: when a route sets a path prefix using the PathPrefix() method, +// strict slash is ignored for that route because the redirect behavior can't +// be determined from a prefix alone. However, any subrouters created from that +// route inherit the original StrictSlash setting. +func (r *Router) StrictSlash(value bool) *Router { + r.strictSlash = value + return r +} + +// SkipClean defines the path cleaning behaviour for new routes. The initial +// value is false. Users should be careful about which routes are not cleaned +// +// When true, if the route path is "/path//to", it will remain with the double +// slash. This is helpful if you have a route like: /fetch/http://xkcd.com/534/ +// +// When false, the path will be cleaned, so /fetch/http://xkcd.com/534/ will +// become /fetch/http/xkcd.com/534 +func (r *Router) SkipClean(value bool) *Router { + r.skipClean = value + return r +} + +// UseEncodedPath tells the router to match the encoded original path +// to the routes. +// For eg. "/path/foo%2Fbar/to" will match the path "/path/{var}/to". +// +// If not called, the router will match the unencoded path to the routes. +// For eg. "/path/foo%2Fbar/to" will match the path "/path/foo/bar/to" +func (r *Router) UseEncodedPath() *Router { + r.useEncodedPath = true + return r +} + +// ---------------------------------------------------------------------------- +// Route factories +// ---------------------------------------------------------------------------- + +// NewRoute registers an empty route. +func (r *Router) NewRoute() *Route { + // initialize a route with a copy of the parent router's configuration + route := &Route{routeConf: copyRouteConf(r.routeConf), namedRoutes: r.namedRoutes} + r.routes = append(r.routes, route) + return route +} + +// Name registers a new route with a name. +// See Route.Name(). +func (r *Router) Name(name string) *Route { + return r.NewRoute().Name(name) +} + +// Handle registers a new route with a matcher for the URL path. +// See Route.Path() and Route.Handler(). +func (r *Router) Handle(path string, handler http.Handler) *Route { + return r.NewRoute().Path(path).Handler(handler) +} + +// HandleFunc registers a new route with a matcher for the URL path. +// See Route.Path() and Route.HandlerFunc(). +func (r *Router) HandleFunc(path string, f func(http.ResponseWriter, + *http.Request)) *Route { + return r.NewRoute().Path(path).HandlerFunc(f) +} + +// Headers registers a new route with a matcher for request header values. +// See Route.Headers(). +func (r *Router) Headers(pairs ...string) *Route { + return r.NewRoute().Headers(pairs...) +} + +// Host registers a new route with a matcher for the URL host. +// See Route.Host(). +func (r *Router) Host(tpl string) *Route { + return r.NewRoute().Host(tpl) +} + +// MatcherFunc registers a new route with a custom matcher function. +// See Route.MatcherFunc(). +func (r *Router) MatcherFunc(f MatcherFunc) *Route { + return r.NewRoute().MatcherFunc(f) +} + +// Methods registers a new route with a matcher for HTTP methods. +// See Route.Methods(). +func (r *Router) Methods(methods ...string) *Route { + return r.NewRoute().Methods(methods...) +} + +// Path registers a new route with a matcher for the URL path. +// See Route.Path(). +func (r *Router) Path(tpl string) *Route { + return r.NewRoute().Path(tpl) +} + +// PathPrefix registers a new route with a matcher for the URL path prefix. +// See Route.PathPrefix(). +func (r *Router) PathPrefix(tpl string) *Route { + return r.NewRoute().PathPrefix(tpl) +} + +// Queries registers a new route with a matcher for URL query values. +// See Route.Queries(). +func (r *Router) Queries(pairs ...string) *Route { + return r.NewRoute().Queries(pairs...) +} + +// Schemes registers a new route with a matcher for URL schemes. +// See Route.Schemes(). +func (r *Router) Schemes(schemes ...string) *Route { + return r.NewRoute().Schemes(schemes...) +} + +// BuildVarsFunc registers a new route with a custom function for modifying +// route variables before building a URL. +func (r *Router) BuildVarsFunc(f BuildVarsFunc) *Route { + return r.NewRoute().BuildVarsFunc(f) +} + +// Walk walks the router and all its sub-routers, calling walkFn for each route +// in the tree. The routes are walked in the order they were added. Sub-routers +// are explored depth-first. +func (r *Router) Walk(walkFn WalkFunc) error { + return r.walk(walkFn, []*Route{}) +} + +// SkipRouter is used as a return value from WalkFuncs to indicate that the +// router that walk is about to descend down to should be skipped. +var SkipRouter = errors.New("skip this router") + +// WalkFunc is the type of the function called for each route visited by Walk. +// At every invocation, it is given the current route, and the current router, +// and a list of ancestor routes that lead to the current route. +type WalkFunc func(route *Route, router *Router, ancestors []*Route) error + +func (r *Router) walk(walkFn WalkFunc, ancestors []*Route) error { + for _, t := range r.routes { + err := walkFn(t, r, ancestors) + if err == SkipRouter { + continue + } + if err != nil { + return err + } + for _, sr := range t.matchers { + if h, ok := sr.(*Router); ok { + ancestors = append(ancestors, t) + err := h.walk(walkFn, ancestors) + if err != nil { + return err + } + ancestors = ancestors[:len(ancestors)-1] + } + } + if h, ok := t.handler.(*Router); ok { + ancestors = append(ancestors, t) + err := h.walk(walkFn, ancestors) + if err != nil { + return err + } + ancestors = ancestors[:len(ancestors)-1] + } + } + return nil +} + +// ---------------------------------------------------------------------------- +// Context +// ---------------------------------------------------------------------------- + +// RouteMatch stores information about a matched route. +type RouteMatch struct { + Route *Route + Handler http.Handler + Vars map[string]string + + // MatchErr is set to appropriate matching error + // It is set to ErrMethodMismatch if there is a mismatch in + // the request method and route method + MatchErr error +} + +type contextKey int + +const ( + varsKey contextKey = iota + routeKey +) + +// Vars returns the route variables for the current request, if any. +func Vars(r *http.Request) map[string]string { + if rv := r.Context().Value(varsKey); rv != nil { + return rv.(map[string]string) + } + return nil +} + +// CurrentRoute returns the matched route for the current request, if any. +// This only works when called inside the handler of the matched route +// because the matched route is stored in the request context which is cleared +// after the handler returns. +func CurrentRoute(r *http.Request) *Route { + if rv := r.Context().Value(routeKey); rv != nil { + return rv.(*Route) + } + return nil +} + +func requestWithVars(r *http.Request, vars map[string]string) *http.Request { + ctx := context.WithValue(r.Context(), varsKey, vars) + return r.WithContext(ctx) +} + +func requestWithRoute(r *http.Request, route *Route) *http.Request { + ctx := context.WithValue(r.Context(), routeKey, route) + return r.WithContext(ctx) +} + +// ---------------------------------------------------------------------------- +// Helpers +// ---------------------------------------------------------------------------- + +// cleanPath returns the canonical path for p, eliminating . and .. elements. +// Borrowed from the net/http package. +func cleanPath(p string) string { + if p == "" { + return "/" + } + if p[0] != '/' { + p = "/" + p + } + np := path.Clean(p) + // path.Clean removes trailing slash except for root; + // put the trailing slash back if necessary. + if p[len(p)-1] == '/' && np != "/" { + np += "/" + } + + return np +} + +// uniqueVars returns an error if two slices contain duplicated strings. +func uniqueVars(s1, s2 []string) error { + for _, v1 := range s1 { + for _, v2 := range s2 { + if v1 == v2 { + return fmt.Errorf("mux: duplicated route variable %q", v2) + } + } + } + return nil +} + +// checkPairs returns the count of strings passed in, and an error if +// the count is not an even number. +func checkPairs(pairs ...string) (int, error) { + length := len(pairs) + if length%2 != 0 { + return length, fmt.Errorf( + "mux: number of parameters must be multiple of 2, got %v", pairs) + } + return length, nil +} + +// mapFromPairsToString converts variadic string parameters to a +// string to string map. +func mapFromPairsToString(pairs ...string) (map[string]string, error) { + length, err := checkPairs(pairs...) + if err != nil { + return nil, err + } + m := make(map[string]string, length/2) + for i := 0; i < length; i += 2 { + m[pairs[i]] = pairs[i+1] + } + return m, nil +} + +// mapFromPairsToRegex converts variadic string parameters to a +// string to regex map. +func mapFromPairsToRegex(pairs ...string) (map[string]*regexp.Regexp, error) { + length, err := checkPairs(pairs...) + if err != nil { + return nil, err + } + m := make(map[string]*regexp.Regexp, length/2) + for i := 0; i < length; i += 2 { + regex, err := regexp.Compile(pairs[i+1]) + if err != nil { + return nil, err + } + m[pairs[i]] = regex + } + return m, nil +} + +// matchInArray returns true if the given string value is in the array. +func matchInArray(arr []string, value string) bool { + for _, v := range arr { + if v == value { + return true + } + } + return false +} + +// matchMapWithString returns true if the given key/value pairs exist in a given map. +func matchMapWithString(toCheck map[string]string, toMatch map[string][]string, canonicalKey bool) bool { + for k, v := range toCheck { + // Check if key exists. + if canonicalKey { + k = http.CanonicalHeaderKey(k) + } + if values := toMatch[k]; values == nil { + return false + } else if v != "" { + // If value was defined as an empty string we only check that the + // key exists. Otherwise we also check for equality. + valueExists := false + for _, value := range values { + if v == value { + valueExists = true + break + } + } + if !valueExists { + return false + } + } + } + return true +} + +// matchMapWithRegex returns true if the given key/value pairs exist in a given map compiled against +// the given regex +func matchMapWithRegex(toCheck map[string]*regexp.Regexp, toMatch map[string][]string, canonicalKey bool) bool { + for k, v := range toCheck { + // Check if key exists. + if canonicalKey { + k = http.CanonicalHeaderKey(k) + } + if values := toMatch[k]; values == nil { + return false + } else if v != nil { + // If value was defined as an empty string we only check that the + // key exists. Otherwise we also check for equality. + valueExists := false + for _, value := range values { + if v.MatchString(value) { + valueExists = true + break + } + } + if !valueExists { + return false + } + } + } + return true +} + +// methodNotAllowed replies to the request with an HTTP status code 405. +func methodNotAllowed(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusMethodNotAllowed) +} + +// methodNotAllowedHandler returns a simple request handler +// that replies to each request with a status code 405. +func methodNotAllowedHandler() http.Handler { return http.HandlerFunc(methodNotAllowed) } diff --git a/vendor/github.com/gorilla/mux/regexp.go b/vendor/github.com/gorilla/mux/regexp.go new file mode 100644 index 0000000000..0144842bb2 --- /dev/null +++ b/vendor/github.com/gorilla/mux/regexp.go @@ -0,0 +1,388 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "bytes" + "fmt" + "net/http" + "net/url" + "regexp" + "strconv" + "strings" +) + +type routeRegexpOptions struct { + strictSlash bool + useEncodedPath bool +} + +type regexpType int + +const ( + regexpTypePath regexpType = 0 + regexpTypeHost regexpType = 1 + regexpTypePrefix regexpType = 2 + regexpTypeQuery regexpType = 3 +) + +// newRouteRegexp parses a route template and returns a routeRegexp, +// used to match a host, a path or a query string. +// +// It will extract named variables, assemble a regexp to be matched, create +// a "reverse" template to build URLs and compile regexps to validate variable +// values used in URL building. +// +// Previously we accepted only Python-like identifiers for variable +// names ([a-zA-Z_][a-zA-Z0-9_]*), but currently the only restriction is that +// name and pattern can't be empty, and names can't contain a colon. +func newRouteRegexp(tpl string, typ regexpType, options routeRegexpOptions) (*routeRegexp, error) { + // Check if it is well-formed. + idxs, errBraces := braceIndices(tpl) + if errBraces != nil { + return nil, errBraces + } + // Backup the original. + template := tpl + // Now let's parse it. + defaultPattern := "[^/]+" + if typ == regexpTypeQuery { + defaultPattern = ".*" + } else if typ == regexpTypeHost { + defaultPattern = "[^.]+" + } + // Only match strict slash if not matching + if typ != regexpTypePath { + options.strictSlash = false + } + // Set a flag for strictSlash. + endSlash := false + if options.strictSlash && strings.HasSuffix(tpl, "/") { + tpl = tpl[:len(tpl)-1] + endSlash = true + } + varsN := make([]string, len(idxs)/2) + varsR := make([]*regexp.Regexp, len(idxs)/2) + pattern := bytes.NewBufferString("") + pattern.WriteByte('^') + reverse := bytes.NewBufferString("") + var end int + var err error + for i := 0; i < len(idxs); i += 2 { + // Set all values we are interested in. + raw := tpl[end:idxs[i]] + end = idxs[i+1] + parts := strings.SplitN(tpl[idxs[i]+1:end-1], ":", 2) + name := parts[0] + patt := defaultPattern + if len(parts) == 2 { + patt = parts[1] + } + // Name or pattern can't be empty. + if name == "" || patt == "" { + return nil, fmt.Errorf("mux: missing name or pattern in %q", + tpl[idxs[i]:end]) + } + // Build the regexp pattern. + fmt.Fprintf(pattern, "%s(?P<%s>%s)", regexp.QuoteMeta(raw), varGroupName(i/2), patt) + + // Build the reverse template. + fmt.Fprintf(reverse, "%s%%s", raw) + + // Append variable name and compiled pattern. + varsN[i/2] = name + varsR[i/2], err = regexp.Compile(fmt.Sprintf("^%s$", patt)) + if err != nil { + return nil, err + } + } + // Add the remaining. + raw := tpl[end:] + pattern.WriteString(regexp.QuoteMeta(raw)) + if options.strictSlash { + pattern.WriteString("[/]?") + } + if typ == regexpTypeQuery { + // Add the default pattern if the query value is empty + if queryVal := strings.SplitN(template, "=", 2)[1]; queryVal == "" { + pattern.WriteString(defaultPattern) + } + } + if typ != regexpTypePrefix { + pattern.WriteByte('$') + } + + var wildcardHostPort bool + if typ == regexpTypeHost { + if !strings.Contains(pattern.String(), ":") { + wildcardHostPort = true + } + } + reverse.WriteString(raw) + if endSlash { + reverse.WriteByte('/') + } + // Compile full regexp. + reg, errCompile := regexp.Compile(pattern.String()) + if errCompile != nil { + return nil, errCompile + } + + // Check for capturing groups which used to work in older versions + if reg.NumSubexp() != len(idxs)/2 { + panic(fmt.Sprintf("route %s contains capture groups in its regexp. ", template) + + "Only non-capturing groups are accepted: e.g. (?:pattern) instead of (pattern)") + } + + // Done! + return &routeRegexp{ + template: template, + regexpType: typ, + options: options, + regexp: reg, + reverse: reverse.String(), + varsN: varsN, + varsR: varsR, + wildcardHostPort: wildcardHostPort, + }, nil +} + +// routeRegexp stores a regexp to match a host or path and information to +// collect and validate route variables. +type routeRegexp struct { + // The unmodified template. + template string + // The type of match + regexpType regexpType + // Options for matching + options routeRegexpOptions + // Expanded regexp. + regexp *regexp.Regexp + // Reverse template. + reverse string + // Variable names. + varsN []string + // Variable regexps (validators). + varsR []*regexp.Regexp + // Wildcard host-port (no strict port match in hostname) + wildcardHostPort bool +} + +// Match matches the regexp against the URL host or path. +func (r *routeRegexp) Match(req *http.Request, match *RouteMatch) bool { + if r.regexpType == regexpTypeHost { + host := getHost(req) + if r.wildcardHostPort { + // Don't be strict on the port match + if i := strings.Index(host, ":"); i != -1 { + host = host[:i] + } + } + return r.regexp.MatchString(host) + } + + if r.regexpType == regexpTypeQuery { + return r.matchQueryString(req) + } + path := req.URL.Path + if r.options.useEncodedPath { + path = req.URL.EscapedPath() + } + return r.regexp.MatchString(path) +} + +// url builds a URL part using the given values. +func (r *routeRegexp) url(values map[string]string) (string, error) { + urlValues := make([]interface{}, len(r.varsN), len(r.varsN)) + for k, v := range r.varsN { + value, ok := values[v] + if !ok { + return "", fmt.Errorf("mux: missing route variable %q", v) + } + if r.regexpType == regexpTypeQuery { + value = url.QueryEscape(value) + } + urlValues[k] = value + } + rv := fmt.Sprintf(r.reverse, urlValues...) + if !r.regexp.MatchString(rv) { + // The URL is checked against the full regexp, instead of checking + // individual variables. This is faster but to provide a good error + // message, we check individual regexps if the URL doesn't match. + for k, v := range r.varsN { + if !r.varsR[k].MatchString(values[v]) { + return "", fmt.Errorf( + "mux: variable %q doesn't match, expected %q", values[v], + r.varsR[k].String()) + } + } + } + return rv, nil +} + +// getURLQuery returns a single query parameter from a request URL. +// For a URL with foo=bar&baz=ding, we return only the relevant key +// value pair for the routeRegexp. +func (r *routeRegexp) getURLQuery(req *http.Request) string { + if r.regexpType != regexpTypeQuery { + return "" + } + templateKey := strings.SplitN(r.template, "=", 2)[0] + val, ok := findFirstQueryKey(req.URL.RawQuery, templateKey) + if ok { + return templateKey + "=" + val + } + return "" +} + +// findFirstQueryKey returns the same result as (*url.URL).Query()[key][0]. +// If key was not found, empty string and false is returned. +func findFirstQueryKey(rawQuery, key string) (value string, ok bool) { + query := []byte(rawQuery) + for len(query) > 0 { + foundKey := query + if i := bytes.IndexAny(foundKey, "&;"); i >= 0 { + foundKey, query = foundKey[:i], foundKey[i+1:] + } else { + query = query[:0] + } + if len(foundKey) == 0 { + continue + } + var value []byte + if i := bytes.IndexByte(foundKey, '='); i >= 0 { + foundKey, value = foundKey[:i], foundKey[i+1:] + } + if len(foundKey) < len(key) { + // Cannot possibly be key. + continue + } + keyString, err := url.QueryUnescape(string(foundKey)) + if err != nil { + continue + } + if keyString != key { + continue + } + valueString, err := url.QueryUnescape(string(value)) + if err != nil { + continue + } + return valueString, true + } + return "", false +} + +func (r *routeRegexp) matchQueryString(req *http.Request) bool { + return r.regexp.MatchString(r.getURLQuery(req)) +} + +// braceIndices returns the first level curly brace indices from a string. +// It returns an error in case of unbalanced braces. +func braceIndices(s string) ([]int, error) { + var level, idx int + var idxs []int + for i := 0; i < len(s); i++ { + switch s[i] { + case '{': + if level++; level == 1 { + idx = i + } + case '}': + if level--; level == 0 { + idxs = append(idxs, idx, i+1) + } else if level < 0 { + return nil, fmt.Errorf("mux: unbalanced braces in %q", s) + } + } + } + if level != 0 { + return nil, fmt.Errorf("mux: unbalanced braces in %q", s) + } + return idxs, nil +} + +// varGroupName builds a capturing group name for the indexed variable. +func varGroupName(idx int) string { + return "v" + strconv.Itoa(idx) +} + +// ---------------------------------------------------------------------------- +// routeRegexpGroup +// ---------------------------------------------------------------------------- + +// routeRegexpGroup groups the route matchers that carry variables. +type routeRegexpGroup struct { + host *routeRegexp + path *routeRegexp + queries []*routeRegexp +} + +// setMatch extracts the variables from the URL once a route matches. +func (v routeRegexpGroup) setMatch(req *http.Request, m *RouteMatch, r *Route) { + // Store host variables. + if v.host != nil { + host := getHost(req) + if v.host.wildcardHostPort { + // Don't be strict on the port match + if i := strings.Index(host, ":"); i != -1 { + host = host[:i] + } + } + matches := v.host.regexp.FindStringSubmatchIndex(host) + if len(matches) > 0 { + extractVars(host, matches, v.host.varsN, m.Vars) + } + } + path := req.URL.Path + if r.useEncodedPath { + path = req.URL.EscapedPath() + } + // Store path variables. + if v.path != nil { + matches := v.path.regexp.FindStringSubmatchIndex(path) + if len(matches) > 0 { + extractVars(path, matches, v.path.varsN, m.Vars) + // Check if we should redirect. + if v.path.options.strictSlash { + p1 := strings.HasSuffix(path, "/") + p2 := strings.HasSuffix(v.path.template, "/") + if p1 != p2 { + u, _ := url.Parse(req.URL.String()) + if p1 { + u.Path = u.Path[:len(u.Path)-1] + } else { + u.Path += "/" + } + m.Handler = http.RedirectHandler(u.String(), http.StatusMovedPermanently) + } + } + } + } + // Store query string variables. + for _, q := range v.queries { + queryURL := q.getURLQuery(req) + matches := q.regexp.FindStringSubmatchIndex(queryURL) + if len(matches) > 0 { + extractVars(queryURL, matches, q.varsN, m.Vars) + } + } +} + +// getHost tries its best to return the request host. +// According to section 14.23 of RFC 2616 the Host header +// can include the port number if the default value of 80 is not used. +func getHost(r *http.Request) string { + if r.URL.IsAbs() { + return r.URL.Host + } + return r.Host +} + +func extractVars(input string, matches []int, names []string, output map[string]string) { + for i, name := range names { + output[name] = input[matches[2*i+2]:matches[2*i+3]] + } +} diff --git a/vendor/github.com/gorilla/mux/route.go b/vendor/github.com/gorilla/mux/route.go new file mode 100644 index 0000000000..750afe570d --- /dev/null +++ b/vendor/github.com/gorilla/mux/route.go @@ -0,0 +1,736 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import ( + "errors" + "fmt" + "net/http" + "net/url" + "regexp" + "strings" +) + +// Route stores information to match a request and build URLs. +type Route struct { + // Request handler for the route. + handler http.Handler + // If true, this route never matches: it is only used to build URLs. + buildOnly bool + // The name used to build URLs. + name string + // Error resulted from building a route. + err error + + // "global" reference to all named routes + namedRoutes map[string]*Route + + // config possibly passed in from `Router` + routeConf +} + +// SkipClean reports whether path cleaning is enabled for this route via +// Router.SkipClean. +func (r *Route) SkipClean() bool { + return r.skipClean +} + +// Match matches the route against the request. +func (r *Route) Match(req *http.Request, match *RouteMatch) bool { + if r.buildOnly || r.err != nil { + return false + } + + var matchErr error + + // Match everything. + for _, m := range r.matchers { + if matched := m.Match(req, match); !matched { + if _, ok := m.(methodMatcher); ok { + matchErr = ErrMethodMismatch + continue + } + + // Ignore ErrNotFound errors. These errors arise from match call + // to Subrouters. + // + // This prevents subsequent matching subrouters from failing to + // run middleware. If not ignored, the middleware would see a + // non-nil MatchErr and be skipped, even when there was a + // matching route. + if match.MatchErr == ErrNotFound { + match.MatchErr = nil + } + + matchErr = nil + return false + } + } + + if matchErr != nil { + match.MatchErr = matchErr + return false + } + + if match.MatchErr == ErrMethodMismatch && r.handler != nil { + // We found a route which matches request method, clear MatchErr + match.MatchErr = nil + // Then override the mis-matched handler + match.Handler = r.handler + } + + // Yay, we have a match. Let's collect some info about it. + if match.Route == nil { + match.Route = r + } + if match.Handler == nil { + match.Handler = r.handler + } + if match.Vars == nil { + match.Vars = make(map[string]string) + } + + // Set variables. + r.regexp.setMatch(req, match, r) + return true +} + +// ---------------------------------------------------------------------------- +// Route attributes +// ---------------------------------------------------------------------------- + +// GetError returns an error resulted from building the route, if any. +func (r *Route) GetError() error { + return r.err +} + +// BuildOnly sets the route to never match: it is only used to build URLs. +func (r *Route) BuildOnly() *Route { + r.buildOnly = true + return r +} + +// Handler -------------------------------------------------------------------- + +// Handler sets a handler for the route. +func (r *Route) Handler(handler http.Handler) *Route { + if r.err == nil { + r.handler = handler + } + return r +} + +// HandlerFunc sets a handler function for the route. +func (r *Route) HandlerFunc(f func(http.ResponseWriter, *http.Request)) *Route { + return r.Handler(http.HandlerFunc(f)) +} + +// GetHandler returns the handler for the route, if any. +func (r *Route) GetHandler() http.Handler { + return r.handler +} + +// Name ----------------------------------------------------------------------- + +// Name sets the name for the route, used to build URLs. +// It is an error to call Name more than once on a route. +func (r *Route) Name(name string) *Route { + if r.name != "" { + r.err = fmt.Errorf("mux: route already has name %q, can't set %q", + r.name, name) + } + if r.err == nil { + r.name = name + r.namedRoutes[name] = r + } + return r +} + +// GetName returns the name for the route, if any. +func (r *Route) GetName() string { + return r.name +} + +// ---------------------------------------------------------------------------- +// Matchers +// ---------------------------------------------------------------------------- + +// matcher types try to match a request. +type matcher interface { + Match(*http.Request, *RouteMatch) bool +} + +// addMatcher adds a matcher to the route. +func (r *Route) addMatcher(m matcher) *Route { + if r.err == nil { + r.matchers = append(r.matchers, m) + } + return r +} + +// addRegexpMatcher adds a host or path matcher and builder to a route. +func (r *Route) addRegexpMatcher(tpl string, typ regexpType) error { + if r.err != nil { + return r.err + } + if typ == regexpTypePath || typ == regexpTypePrefix { + if len(tpl) > 0 && tpl[0] != '/' { + return fmt.Errorf("mux: path must start with a slash, got %q", tpl) + } + if r.regexp.path != nil { + tpl = strings.TrimRight(r.regexp.path.template, "/") + tpl + } + } + rr, err := newRouteRegexp(tpl, typ, routeRegexpOptions{ + strictSlash: r.strictSlash, + useEncodedPath: r.useEncodedPath, + }) + if err != nil { + return err + } + for _, q := range r.regexp.queries { + if err = uniqueVars(rr.varsN, q.varsN); err != nil { + return err + } + } + if typ == regexpTypeHost { + if r.regexp.path != nil { + if err = uniqueVars(rr.varsN, r.regexp.path.varsN); err != nil { + return err + } + } + r.regexp.host = rr + } else { + if r.regexp.host != nil { + if err = uniqueVars(rr.varsN, r.regexp.host.varsN); err != nil { + return err + } + } + if typ == regexpTypeQuery { + r.regexp.queries = append(r.regexp.queries, rr) + } else { + r.regexp.path = rr + } + } + r.addMatcher(rr) + return nil +} + +// Headers -------------------------------------------------------------------- + +// headerMatcher matches the request against header values. +type headerMatcher map[string]string + +func (m headerMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchMapWithString(m, r.Header, true) +} + +// Headers adds a matcher for request header values. +// It accepts a sequence of key/value pairs to be matched. For example: +// +// r := mux.NewRouter() +// r.Headers("Content-Type", "application/json", +// "X-Requested-With", "XMLHttpRequest") +// +// The above route will only match if both request header values match. +// If the value is an empty string, it will match any value if the key is set. +func (r *Route) Headers(pairs ...string) *Route { + if r.err == nil { + var headers map[string]string + headers, r.err = mapFromPairsToString(pairs...) + return r.addMatcher(headerMatcher(headers)) + } + return r +} + +// headerRegexMatcher matches the request against the route given a regex for the header +type headerRegexMatcher map[string]*regexp.Regexp + +func (m headerRegexMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchMapWithRegex(m, r.Header, true) +} + +// HeadersRegexp accepts a sequence of key/value pairs, where the value has regex +// support. For example: +// +// r := mux.NewRouter() +// r.HeadersRegexp("Content-Type", "application/(text|json)", +// "X-Requested-With", "XMLHttpRequest") +// +// The above route will only match if both the request header matches both regular expressions. +// If the value is an empty string, it will match any value if the key is set. +// Use the start and end of string anchors (^ and $) to match an exact value. +func (r *Route) HeadersRegexp(pairs ...string) *Route { + if r.err == nil { + var headers map[string]*regexp.Regexp + headers, r.err = mapFromPairsToRegex(pairs...) + return r.addMatcher(headerRegexMatcher(headers)) + } + return r +} + +// Host ----------------------------------------------------------------------- + +// Host adds a matcher for the URL host. +// It accepts a template with zero or more URL variables enclosed by {}. +// Variables can define an optional regexp pattern to be matched: +// +// - {name} matches anything until the next dot. +// +// - {name:pattern} matches the given regexp pattern. +// +// For example: +// +// r := mux.NewRouter() +// r.Host("www.example.com") +// r.Host("{subdomain}.domain.com") +// r.Host("{subdomain:[a-z]+}.domain.com") +// +// Variable names must be unique in a given route. They can be retrieved +// calling mux.Vars(request). +func (r *Route) Host(tpl string) *Route { + r.err = r.addRegexpMatcher(tpl, regexpTypeHost) + return r +} + +// MatcherFunc ---------------------------------------------------------------- + +// MatcherFunc is the function signature used by custom matchers. +type MatcherFunc func(*http.Request, *RouteMatch) bool + +// Match returns the match for a given request. +func (m MatcherFunc) Match(r *http.Request, match *RouteMatch) bool { + return m(r, match) +} + +// MatcherFunc adds a custom function to be used as request matcher. +func (r *Route) MatcherFunc(f MatcherFunc) *Route { + return r.addMatcher(f) +} + +// Methods -------------------------------------------------------------------- + +// methodMatcher matches the request against HTTP methods. +type methodMatcher []string + +func (m methodMatcher) Match(r *http.Request, match *RouteMatch) bool { + return matchInArray(m, r.Method) +} + +// Methods adds a matcher for HTTP methods. +// It accepts a sequence of one or more methods to be matched, e.g.: +// "GET", "POST", "PUT". +func (r *Route) Methods(methods ...string) *Route { + for k, v := range methods { + methods[k] = strings.ToUpper(v) + } + return r.addMatcher(methodMatcher(methods)) +} + +// Path ----------------------------------------------------------------------- + +// Path adds a matcher for the URL path. +// It accepts a template with zero or more URL variables enclosed by {}. The +// template must start with a "/". +// Variables can define an optional regexp pattern to be matched: +// +// - {name} matches anything until the next slash. +// +// - {name:pattern} matches the given regexp pattern. +// +// For example: +// +// r := mux.NewRouter() +// r.Path("/products/").Handler(ProductsHandler) +// r.Path("/products/{key}").Handler(ProductsHandler) +// r.Path("/articles/{category}/{id:[0-9]+}"). +// Handler(ArticleHandler) +// +// Variable names must be unique in a given route. They can be retrieved +// calling mux.Vars(request). +func (r *Route) Path(tpl string) *Route { + r.err = r.addRegexpMatcher(tpl, regexpTypePath) + return r +} + +// PathPrefix ----------------------------------------------------------------- + +// PathPrefix adds a matcher for the URL path prefix. This matches if the given +// template is a prefix of the full URL path. See Route.Path() for details on +// the tpl argument. +// +// Note that it does not treat slashes specially ("/foobar/" will be matched by +// the prefix "/foo") so you may want to use a trailing slash here. +// +// Also note that the setting of Router.StrictSlash() has no effect on routes +// with a PathPrefix matcher. +func (r *Route) PathPrefix(tpl string) *Route { + r.err = r.addRegexpMatcher(tpl, regexpTypePrefix) + return r +} + +// Query ---------------------------------------------------------------------- + +// Queries adds a matcher for URL query values. +// It accepts a sequence of key/value pairs. Values may define variables. +// For example: +// +// r := mux.NewRouter() +// r.Queries("foo", "bar", "id", "{id:[0-9]+}") +// +// The above route will only match if the URL contains the defined queries +// values, e.g.: ?foo=bar&id=42. +// +// If the value is an empty string, it will match any value if the key is set. +// +// Variables can define an optional regexp pattern to be matched: +// +// - {name} matches anything until the next slash. +// +// - {name:pattern} matches the given regexp pattern. +func (r *Route) Queries(pairs ...string) *Route { + length := len(pairs) + if length%2 != 0 { + r.err = fmt.Errorf( + "mux: number of parameters must be multiple of 2, got %v", pairs) + return nil + } + for i := 0; i < length; i += 2 { + if r.err = r.addRegexpMatcher(pairs[i]+"="+pairs[i+1], regexpTypeQuery); r.err != nil { + return r + } + } + + return r +} + +// Schemes -------------------------------------------------------------------- + +// schemeMatcher matches the request against URL schemes. +type schemeMatcher []string + +func (m schemeMatcher) Match(r *http.Request, match *RouteMatch) bool { + scheme := r.URL.Scheme + // https://golang.org/pkg/net/http/#Request + // "For [most] server requests, fields other than Path and RawQuery will be + // empty." + // Since we're an http muxer, the scheme is either going to be http or https + // though, so we can just set it based on the tls termination state. + if scheme == "" { + if r.TLS == nil { + scheme = "http" + } else { + scheme = "https" + } + } + return matchInArray(m, scheme) +} + +// Schemes adds a matcher for URL schemes. +// It accepts a sequence of schemes to be matched, e.g.: "http", "https". +// If the request's URL has a scheme set, it will be matched against. +// Generally, the URL scheme will only be set if a previous handler set it, +// such as the ProxyHeaders handler from gorilla/handlers. +// If unset, the scheme will be determined based on the request's TLS +// termination state. +// The first argument to Schemes will be used when constructing a route URL. +func (r *Route) Schemes(schemes ...string) *Route { + for k, v := range schemes { + schemes[k] = strings.ToLower(v) + } + if len(schemes) > 0 { + r.buildScheme = schemes[0] + } + return r.addMatcher(schemeMatcher(schemes)) +} + +// BuildVarsFunc -------------------------------------------------------------- + +// BuildVarsFunc is the function signature used by custom build variable +// functions (which can modify route variables before a route's URL is built). +type BuildVarsFunc func(map[string]string) map[string]string + +// BuildVarsFunc adds a custom function to be used to modify build variables +// before a route's URL is built. +func (r *Route) BuildVarsFunc(f BuildVarsFunc) *Route { + if r.buildVarsFunc != nil { + // compose the old and new functions + old := r.buildVarsFunc + r.buildVarsFunc = func(m map[string]string) map[string]string { + return f(old(m)) + } + } else { + r.buildVarsFunc = f + } + return r +} + +// Subrouter ------------------------------------------------------------------ + +// Subrouter creates a subrouter for the route. +// +// It will test the inner routes only if the parent route matched. For example: +// +// r := mux.NewRouter() +// s := r.Host("www.example.com").Subrouter() +// s.HandleFunc("/products/", ProductsHandler) +// s.HandleFunc("/products/{key}", ProductHandler) +// s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) +// +// Here, the routes registered in the subrouter won't be tested if the host +// doesn't match. +func (r *Route) Subrouter() *Router { + // initialize a subrouter with a copy of the parent route's configuration + router := &Router{routeConf: copyRouteConf(r.routeConf), namedRoutes: r.namedRoutes} + r.addMatcher(router) + return router +} + +// ---------------------------------------------------------------------------- +// URL building +// ---------------------------------------------------------------------------- + +// URL builds a URL for the route. +// +// It accepts a sequence of key/value pairs for the route variables. For +// example, given this route: +// +// r := mux.NewRouter() +// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). +// Name("article") +// +// ...a URL for it can be built using: +// +// url, err := r.Get("article").URL("category", "technology", "id", "42") +// +// ...which will return an url.URL with the following path: +// +// "/articles/technology/42" +// +// This also works for host variables: +// +// r := mux.NewRouter() +// r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). +// Host("{subdomain}.domain.com"). +// Name("article") +// +// // url.String() will be "http://news.domain.com/articles/technology/42" +// url, err := r.Get("article").URL("subdomain", "news", +// "category", "technology", +// "id", "42") +// +// The scheme of the resulting url will be the first argument that was passed to Schemes: +// +// // url.String() will be "https://example.com" +// r := mux.NewRouter() +// url, err := r.Host("example.com") +// .Schemes("https", "http").URL() +// +// All variables defined in the route are required, and their values must +// conform to the corresponding patterns. +func (r *Route) URL(pairs ...string) (*url.URL, error) { + if r.err != nil { + return nil, r.err + } + values, err := r.prepareVars(pairs...) + if err != nil { + return nil, err + } + var scheme, host, path string + queries := make([]string, 0, len(r.regexp.queries)) + if r.regexp.host != nil { + if host, err = r.regexp.host.url(values); err != nil { + return nil, err + } + scheme = "http" + if r.buildScheme != "" { + scheme = r.buildScheme + } + } + if r.regexp.path != nil { + if path, err = r.regexp.path.url(values); err != nil { + return nil, err + } + } + for _, q := range r.regexp.queries { + var query string + if query, err = q.url(values); err != nil { + return nil, err + } + queries = append(queries, query) + } + return &url.URL{ + Scheme: scheme, + Host: host, + Path: path, + RawQuery: strings.Join(queries, "&"), + }, nil +} + +// URLHost builds the host part of the URL for a route. See Route.URL(). +// +// The route must have a host defined. +func (r *Route) URLHost(pairs ...string) (*url.URL, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp.host == nil { + return nil, errors.New("mux: route doesn't have a host") + } + values, err := r.prepareVars(pairs...) + if err != nil { + return nil, err + } + host, err := r.regexp.host.url(values) + if err != nil { + return nil, err + } + u := &url.URL{ + Scheme: "http", + Host: host, + } + if r.buildScheme != "" { + u.Scheme = r.buildScheme + } + return u, nil +} + +// URLPath builds the path part of the URL for a route. See Route.URL(). +// +// The route must have a path defined. +func (r *Route) URLPath(pairs ...string) (*url.URL, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp.path == nil { + return nil, errors.New("mux: route doesn't have a path") + } + values, err := r.prepareVars(pairs...) + if err != nil { + return nil, err + } + path, err := r.regexp.path.url(values) + if err != nil { + return nil, err + } + return &url.URL{ + Path: path, + }, nil +} + +// GetPathTemplate returns the template used to build the +// route match. +// This is useful for building simple REST API documentation and for instrumentation +// against third-party services. +// An error will be returned if the route does not define a path. +func (r *Route) GetPathTemplate() (string, error) { + if r.err != nil { + return "", r.err + } + if r.regexp.path == nil { + return "", errors.New("mux: route doesn't have a path") + } + return r.regexp.path.template, nil +} + +// GetPathRegexp returns the expanded regular expression used to match route path. +// This is useful for building simple REST API documentation and for instrumentation +// against third-party services. +// An error will be returned if the route does not define a path. +func (r *Route) GetPathRegexp() (string, error) { + if r.err != nil { + return "", r.err + } + if r.regexp.path == nil { + return "", errors.New("mux: route does not have a path") + } + return r.regexp.path.regexp.String(), nil +} + +// GetQueriesRegexp returns the expanded regular expressions used to match the +// route queries. +// This is useful for building simple REST API documentation and for instrumentation +// against third-party services. +// An error will be returned if the route does not have queries. +func (r *Route) GetQueriesRegexp() ([]string, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp.queries == nil { + return nil, errors.New("mux: route doesn't have queries") + } + queries := make([]string, 0, len(r.regexp.queries)) + for _, query := range r.regexp.queries { + queries = append(queries, query.regexp.String()) + } + return queries, nil +} + +// GetQueriesTemplates returns the templates used to build the +// query matching. +// This is useful for building simple REST API documentation and for instrumentation +// against third-party services. +// An error will be returned if the route does not define queries. +func (r *Route) GetQueriesTemplates() ([]string, error) { + if r.err != nil { + return nil, r.err + } + if r.regexp.queries == nil { + return nil, errors.New("mux: route doesn't have queries") + } + queries := make([]string, 0, len(r.regexp.queries)) + for _, query := range r.regexp.queries { + queries = append(queries, query.template) + } + return queries, nil +} + +// GetMethods returns the methods the route matches against +// This is useful for building simple REST API documentation and for instrumentation +// against third-party services. +// An error will be returned if route does not have methods. +func (r *Route) GetMethods() ([]string, error) { + if r.err != nil { + return nil, r.err + } + for _, m := range r.matchers { + if methods, ok := m.(methodMatcher); ok { + return []string(methods), nil + } + } + return nil, errors.New("mux: route doesn't have methods") +} + +// GetHostTemplate returns the template used to build the +// route match. +// This is useful for building simple REST API documentation and for instrumentation +// against third-party services. +// An error will be returned if the route does not define a host. +func (r *Route) GetHostTemplate() (string, error) { + if r.err != nil { + return "", r.err + } + if r.regexp.host == nil { + return "", errors.New("mux: route doesn't have a host") + } + return r.regexp.host.template, nil +} + +// prepareVars converts the route variable pairs into a map. If the route has a +// BuildVarsFunc, it is invoked. +func (r *Route) prepareVars(pairs ...string) (map[string]string, error) { + m, err := mapFromPairsToString(pairs...) + if err != nil { + return nil, err + } + return r.buildVars(m), nil +} + +func (r *Route) buildVars(m map[string]string) map[string]string { + if r.buildVarsFunc != nil { + m = r.buildVarsFunc(m) + } + return m +} diff --git a/vendor/github.com/gorilla/mux/test_helpers.go b/vendor/github.com/gorilla/mux/test_helpers.go new file mode 100644 index 0000000000..5f5c496de0 --- /dev/null +++ b/vendor/github.com/gorilla/mux/test_helpers.go @@ -0,0 +1,19 @@ +// Copyright 2012 The Gorilla Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package mux + +import "net/http" + +// SetURLVars sets the URL variables for the given request, to be accessed via +// mux.Vars for testing route behaviour. Arguments are not modified, a shallow +// copy is returned. +// +// This API should only be used for testing purposes; it provides a way to +// inject variables into the request context. Alternatively, URL variables +// can be set by making a route that captures the required variables, +// starting a server and sending the request to that server. +func SetURLVars(r *http.Request, val map[string]string) *http.Request { + return requestWithVars(r, val) +} diff --git a/vendor/github.com/gosuri/uitable/.travis.yml b/vendor/github.com/gosuri/uitable/.travis.yml new file mode 100644 index 0000000000..26fc3b438b --- /dev/null +++ b/vendor/github.com/gosuri/uitable/.travis.yml @@ -0,0 +1,6 @@ +language: go +sudo: false +install: + - go get ./... +go: + - tip diff --git a/vendor/github.com/gosuri/uitable/LICENSE b/vendor/github.com/gosuri/uitable/LICENSE new file mode 100644 index 0000000000..e436d90439 --- /dev/null +++ b/vendor/github.com/gosuri/uitable/LICENSE @@ -0,0 +1,10 @@ +MIT License +=========== + +Copyright (c) 2015, Greg Osuri + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/gosuri/uitable/Makefile b/vendor/github.com/gosuri/uitable/Makefile new file mode 100644 index 0000000000..60246a8a93 --- /dev/null +++ b/vendor/github.com/gosuri/uitable/Makefile @@ -0,0 +1,4 @@ +test: + @go test ./... + +.PHONY: test diff --git a/vendor/github.com/gosuri/uitable/README.md b/vendor/github.com/gosuri/uitable/README.md new file mode 100644 index 0000000000..683b538d13 --- /dev/null +++ b/vendor/github.com/gosuri/uitable/README.md @@ -0,0 +1,67 @@ +# uitable [![GoDoc](https://godoc.org/github.com/gosuri/uitable?status.svg)](https://godoc.org/github.com/gosuri/uitable) [![Build Status](https://travis-ci.org/gosuri/uitable.svg?branch=master)](https://travis-ci.org/gosuri/uitable) + +uitable is a go library for representing data as tables for terminal applications. It provides primitives for sizing and wrapping columns to improve readability. + +## Example Usage + +Full source code for the example is available at [example/main.go](example/main.go) + +```go +table := uitable.New() +table.MaxColWidth = 50 + +table.AddRow("NAME", "BIRTHDAY", "BIO") +for _, hacker := range hackers { + table.AddRow(hacker.Name, hacker.Birthday, hacker.Bio) +} +fmt.Println(table) +``` + +Will render the data as: + +```sh +NAME BIRTHDAY BIO +Ada Lovelace December 10, 1815 Ada was a British mathematician and writer, chi... +Alan Turing June 23, 1912 Alan was a British pioneering computer scientis... +``` + +For wrapping in two columns: + +```go +table = uitable.New() +table.MaxColWidth = 80 +table.Wrap = true // wrap columns + +for _, hacker := range hackers { + table.AddRow("Name:", hacker.Name) + table.AddRow("Birthday:", hacker.Birthday) + table.AddRow("Bio:", hacker.Bio) + table.AddRow("") // blank +} +fmt.Println(table) +``` + +Will render the data as: + +``` +Name: Ada Lovelace +Birthday: December 10, 1815 +Bio: Ada was a British mathematician and writer, chiefly known for her work on + Charles Babbage's early mechanical general-purpose computer, the Analytical + Engine + +Name: Alan Turing +Birthday: June 23, 1912 +Bio: Alan was a British pioneering computer scientist, mathematician, logician, + cryptanalyst and theoretical biologist +``` + +## Installation + +``` +$ go get -v github.com/gosuri/uitable +``` + + +[![Bitdeli Badge](https://d2weczhvl823v0.cloudfront.net/gosuri/uitable/trend.png)](https://bitdeli.com/free "Bitdeli Badge") + diff --git a/vendor/github.com/gosuri/uitable/table.go b/vendor/github.com/gosuri/uitable/table.go new file mode 100644 index 0000000000..b764e51502 --- /dev/null +++ b/vendor/github.com/gosuri/uitable/table.go @@ -0,0 +1,205 @@ +// Package uitable provides a decorator for formating data as a table +package uitable + +import ( + "fmt" + "strings" + "sync" + + "github.com/fatih/color" + "github.com/gosuri/uitable/util/strutil" + "github.com/gosuri/uitable/util/wordwrap" +) + +// Separator is the default column seperator +var Separator = "\t" + +// Table represents a decorator that renders the data in formatted in a table +type Table struct { + // Rows is the collection of rows in the table + Rows []*Row + + // MaxColWidth is the maximum allowed width for cells in the table + MaxColWidth uint + + // Wrap when set to true wraps the contents of the columns when the length exceeds the MaxColWidth + Wrap bool + + // Separator is the seperator for columns in the table. Default is "\t" + Separator string + + mtx *sync.RWMutex + rightAlign map[int]bool +} + +// New returns a new Table with default values +func New() *Table { + return &Table{ + Separator: Separator, + mtx: new(sync.RWMutex), + rightAlign: map[int]bool{}, + } +} + +// AddRow adds a new row to the table +func (t *Table) AddRow(data ...interface{}) *Table { + t.mtx.Lock() + defer t.mtx.Unlock() + r := NewRow(data...) + t.Rows = append(t.Rows, r) + return t +} + +// Bytes returns the []byte value of table +func (t *Table) Bytes() []byte { + return []byte(t.String()) +} + +func (t *Table) RightAlign(col int) { + t.mtx.Lock() + t.rightAlign[col] = true + t.mtx.Unlock() +} + +// String returns the string value of table +func (t *Table) String() string { + t.mtx.RLock() + defer t.mtx.RUnlock() + + if len(t.Rows) == 0 { + return "" + } + + // determine the width for each column (cell in a row) + var colwidths []uint + for _, row := range t.Rows { + for i, cell := range row.Cells { + // resize colwidth array + if i+1 > len(colwidths) { + colwidths = append(colwidths, 0) + } + cellwidth := cell.LineWidth() + if t.MaxColWidth != 0 && cellwidth > t.MaxColWidth { + cellwidth = t.MaxColWidth + } + + if cellwidth > colwidths[i] { + colwidths[i] = cellwidth + } + } + } + + var lines []string + for _, row := range t.Rows { + row.Separator = t.Separator + for i, cell := range row.Cells { + cell.Width = colwidths[i] + cell.Wrap = t.Wrap + cell.RightAlign = t.rightAlign[i] + } + lines = append(lines, row.String()) + } + return strutil.Join(lines, "\n") +} + +// Row represents a row in a table +type Row struct { + // Cells is the group of cell for the row + Cells []*Cell + + // Separator for tabular columns + Separator string +} + +// NewRow returns a new Row and adds the data to the row +func NewRow(data ...interface{}) *Row { + r := &Row{Cells: make([]*Cell, len(data))} + for i, d := range data { + r.Cells[i] = &Cell{Data: d} + } + return r +} + +// String returns the string representation of the row +func (r *Row) String() string { + // get the max number of lines for each cell + var lc int // line count + for _, cell := range r.Cells { + if clc := len(strings.Split(cell.String(), "\n")); clc > lc { + lc = clc + } + } + + // allocate a two-dimentional array of cells for each line and add size them + cells := make([][]*Cell, lc) + for x := 0; x < lc; x++ { + cells[x] = make([]*Cell, len(r.Cells)) + for y := 0; y < len(r.Cells); y++ { + cells[x][y] = &Cell{Width: r.Cells[y].Width} + } + } + + // insert each line in a cell as new cell in the cells array + for y, cell := range r.Cells { + lines := strings.Split(cell.String(), "\n") + for x, line := range lines { + cells[x][y].Data = line + } + } + + // format each line + lines := make([]string, lc) + for x := range lines { + line := make([]string, len(cells[x])) + for y := range cells[x] { + line[y] = cells[x][y].String() + } + lines[x] = strutil.Join(line, r.Separator) + } + return strings.Join(lines, "\n") +} + +// Cell represents a column in a row +type Cell struct { + // Width is the width of the cell + Width uint + + // Wrap when true wraps the contents of the cell when the lenght exceeds the width + Wrap bool + + // RightAlign when true aligns contents to the right + RightAlign bool + + // Data is the cell data + Data interface{} +} + +// LineWidth returns the max width of all the lines in a cell +func (c *Cell) LineWidth() uint { + width := 0 + for _, s := range strings.Split(c.String(), "\n") { + w := strutil.StringWidth(s) + if w > width { + width = w + } + } + return uint(width) +} + +// String returns the string formated representation of the cell +func (c *Cell) String() string { + if c.Data == nil { + return strutil.PadLeft(" ", int(c.Width), ' ') + } + col := color.New(color.FgBlack) + col.DisableColor() + s := fmt.Sprintf("%v", col.Sprint(c.Data)) + if c.Width > 0 { + if c.Wrap && uint(len(s)) > c.Width { + return wordwrap.WrapString(s, c.Width) + } else { + return strutil.Resize(s, c.Width, c.RightAlign) + } + } + return s +} diff --git a/vendor/github.com/gosuri/uitable/util/strutil/strutil.go b/vendor/github.com/gosuri/uitable/util/strutil/strutil.go new file mode 100644 index 0000000000..53f30a8fd3 --- /dev/null +++ b/vendor/github.com/gosuri/uitable/util/strutil/strutil.go @@ -0,0 +1,101 @@ +// Package strutil provides various utilities for manipulating strings +package strutil + +import ( + "bytes" + "regexp" + + "github.com/mattn/go-runewidth" +) + +const ansi = "[\u001B\u009B][[\\]()#;?]*(?:(?:(?:[a-zA-Z\\d]*(?:;[a-zA-Z\\d]*)*)?\u0007)|(?:(?:\\d{1,4}(?:;\\d{0,4})*)?[\\dA-PRZcf-ntqry=><~]))" + +var re = regexp.MustCompile(ansi) + +// PadRight returns a new string of a specified length in which the end of the current string is padded with spaces or with a specified Unicode character. +func PadRight(str string, length int, pad byte) string { + slen := StringWidth(str) + if slen >= length { + return str + } + buf := bytes.NewBufferString(str) + for i := 0; i < length-slen; i++ { + buf.WriteByte(pad) + } + return buf.String() +} + +// PadLeft returns a new string of a specified length in which the beginning of the current string is padded with spaces or with a specified Unicode character. +func PadLeft(str string, length int, pad byte) string { + slen := StringWidth(str) + if slen >= length { + return str + } + var buf bytes.Buffer + for i := 0; i < length-slen; i++ { + buf.WriteByte(pad) + } + buf.WriteString(str) + return buf.String() +} + +// Resize resizes the string with the given length. It ellipses with '...' when the string's length exceeds +// the desired length or pads spaces to the right of the string when length is smaller than desired +func Resize(s string, length uint, rightAlign bool) string { + slen := StringWidth(s) + n := int(length) + if slen == n { + return s + } + // Pads only when length of the string smaller than len needed + if rightAlign { + s = PadLeft(s, n, ' ') + } else { + s = PadRight(s, n, ' ') + } + if slen > n { + rs := []rune(s) + var buf bytes.Buffer + w := 0 + for _, r := range rs { + buf.WriteRune(r) + rw := RuneWidth(r) + if w+rw >= n-3 { + break + } + w += rw + } + buf.WriteString("...") + s = buf.String() + } + return s +} + +// Join joins the list of the string with the delim provided. +// Returns an empty string for empty list +func Join(list []string, delim string) string { + if len(list) == 0 { + return "" + } + var buf bytes.Buffer + for i := 0; i < len(list)-1; i++ { + buf.WriteString(list[i] + delim) + } + buf.WriteString(list[len(list)-1]) + return buf.String() +} + +// Strip strips the string of all colors +func Strip(s string) string { + return re.ReplaceAllString(s, "") +} + +// StringWidth returns the actual width of the string without colors +func StringWidth(s string) int { + return runewidth.StringWidth(Strip(s)) +} + +// RuneWidth returns the actual width of the rune +func RuneWidth(s rune) int { + return runewidth.RuneWidth(s) +} diff --git a/vendor/github.com/gosuri/uitable/util/wordwrap/LICENSE.md b/vendor/github.com/gosuri/uitable/util/wordwrap/LICENSE.md new file mode 100644 index 0000000000..2298515904 --- /dev/null +++ b/vendor/github.com/gosuri/uitable/util/wordwrap/LICENSE.md @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/gosuri/uitable/util/wordwrap/README.md b/vendor/github.com/gosuri/uitable/util/wordwrap/README.md new file mode 100644 index 0000000000..60ae311700 --- /dev/null +++ b/vendor/github.com/gosuri/uitable/util/wordwrap/README.md @@ -0,0 +1,39 @@ +# go-wordwrap + +`go-wordwrap` (Golang package: `wordwrap`) is a package for Go that +automatically wraps words into multiple lines. The primary use case for this +is in formatting CLI output, but of course word wrapping is a generally useful +thing to do. + +## Installation and Usage + +Install using `go get github.com/mitchellh/go-wordwrap`. + +Full documentation is available at +http://godoc.org/github.com/mitchellh/go-wordwrap + +Below is an example of its usage ignoring errors: + +```go +wrapped := wordwrap.WrapString("foo bar baz", 3) +fmt.Println(wrapped) +``` + +Would output: + +``` +foo +bar +baz +``` + +## Word Wrap Algorithm + +This library doesn't use any clever algorithm for word wrapping. The wrapping +is actually very naive: whenever there is whitespace or an explicit linebreak. +The goal of this library is for word wrapping CLI output, so the input is +typically pretty well controlled human language. Because of this, the naive +approach typically works just fine. + +In the future, we'd like to make the algorithm more advanced. We would do +so without breaking the API. diff --git a/vendor/github.com/gosuri/uitable/util/wordwrap/wordwrap.go b/vendor/github.com/gosuri/uitable/util/wordwrap/wordwrap.go new file mode 100644 index 0000000000..b2c63b8795 --- /dev/null +++ b/vendor/github.com/gosuri/uitable/util/wordwrap/wordwrap.go @@ -0,0 +1,85 @@ +// Package wordwrap provides methods for wrapping the contents of a string +package wordwrap + +import ( + "bytes" + "unicode" + + "github.com/gosuri/uitable/util/strutil" +) + +// WrapString wraps the given string within lim width in characters. +// +// Wrapping is currently naive and only happens at white-space. A future +// version of the library will implement smarter wrapping. This means that +// pathological cases can dramatically reach past the limit, such as a very +// long word. +func WrapString(s string, lim uint) string { + // Initialize a buffer with a slightly larger size to account for breaks + init := make([]byte, 0, len(s)) + buf := bytes.NewBuffer(init) + + var current uint + var wordBuf, spaceBuf bytes.Buffer + var wordWidth, spaceWidth int + + for _, char := range s { + if char == '\n' { + if wordBuf.Len() == 0 { + if current+uint(spaceWidth) > lim { + current = 0 + } else { + current += uint(spaceWidth) + spaceBuf.WriteTo(buf) + spaceWidth += strutil.StringWidth(buf.String()) + } + spaceBuf.Reset() + spaceWidth = 0 + } else { + current += uint(spaceWidth + wordWidth) + spaceBuf.WriteTo(buf) + spaceBuf.Reset() + wordBuf.WriteTo(buf) + wordBuf.Reset() + spaceWidth = 0 + wordWidth = 0 + } + buf.WriteRune(char) + current = 0 + } else if unicode.IsSpace(char) { + if spaceBuf.Len() == 0 || wordBuf.Len() > 0 { + current += uint(spaceWidth + wordWidth) + spaceBuf.WriteTo(buf) + spaceBuf.Reset() + wordBuf.WriteTo(buf) + wordBuf.Reset() + spaceWidth = 0 + wordWidth = 0 + } + + spaceBuf.WriteRune(char) + spaceWidth += strutil.RuneWidth(char) + } else { + wordBuf.WriteRune(char) + wordWidth += strutil.RuneWidth(char) + + if current+uint(spaceWidth+wordWidth) > lim && uint(wordWidth) < lim { + buf.WriteRune('\n') + current = 0 + spaceBuf.Reset() + spaceWidth = 0 + } + } + } + + if wordBuf.Len() == 0 { + if current+uint(spaceWidth) <= lim { + spaceBuf.WriteTo(buf) + } + } else { + spaceBuf.WriteTo(buf) + wordBuf.WriteTo(buf) + } + + return buf.String() +} diff --git a/vendor/github.com/huandu/xstrings/.gitignore b/vendor/github.com/huandu/xstrings/.gitignore new file mode 100644 index 0000000000..daf913b1b3 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/huandu/xstrings/.travis.yml b/vendor/github.com/huandu/xstrings/.travis.yml new file mode 100644 index 0000000000..d6460be411 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/.travis.yml @@ -0,0 +1,7 @@ +language: go +install: + - go get golang.org/x/tools/cmd/cover + - go get github.com/mattn/goveralls +script: + - go test -v -covermode=count -coverprofile=coverage.out + - 'if [ "$TRAVIS_PULL_REQUEST" = "false" ] && [ ! -z "$COVERALLS_TOKEN" ]; then $HOME/gopath/bin/goveralls -coverprofile=coverage.out -service=travis-ci -repotoken $COVERALLS_TOKEN; fi' diff --git a/vendor/github.com/huandu/xstrings/CONTRIBUTING.md b/vendor/github.com/huandu/xstrings/CONTRIBUTING.md new file mode 100644 index 0000000000..d7b4b8d584 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/CONTRIBUTING.md @@ -0,0 +1,23 @@ +# Contributing # + +Thanks for your contribution in advance. No matter what you will contribute to this project, pull request or bug report or feature discussion, it's always highly appreciated. + +## New API or feature ## + +I want to speak more about how to add new functions to this package. + +Package `xstring` is a collection of useful string functions which should be implemented in Go. It's a bit subject to say which function should be included and which should not. I set up following rules in order to make it clear and as objective as possible. + +* Rule 1: Only string algorithm, which takes string as input, can be included. +* Rule 2: If a function has been implemented in package `string`, it must not be included. +* Rule 3: If a function is not language neutral, it must not be included. +* Rule 4: If a function is a part of standard library in other languages, it can be included. +* Rule 5: If a function is quite useful in some famous framework or library, it can be included. + +New function must be discussed in project issues before submitting any code. If a pull request with new functions is sent without any ref issue, it will be rejected. + +## Pull request ## + +Pull request is always welcome. Just make sure you have run `go fmt` and all test cases passed before submit. + +If the pull request is to add a new API or feature, don't forget to update README.md and add new API in function list. diff --git a/vendor/github.com/huandu/xstrings/LICENSE b/vendor/github.com/huandu/xstrings/LICENSE new file mode 100644 index 0000000000..2701772593 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Huan Du + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/github.com/huandu/xstrings/README.md b/vendor/github.com/huandu/xstrings/README.md new file mode 100644 index 0000000000..292bf2f39e --- /dev/null +++ b/vendor/github.com/huandu/xstrings/README.md @@ -0,0 +1,117 @@ +# xstrings # + +[![Build Status](https://travis-ci.org/huandu/xstrings.svg?branch=master)](https://travis-ci.org/huandu/xstrings) +[![GoDoc](https://godoc.org/github.com/huandu/xstrings?status.svg)](https://godoc.org/github.com/huandu/xstrings) +[![Go Report](https://goreportcard.com/badge/github.com/huandu/xstrings)](https://goreportcard.com/report/github.com/huandu/xstrings) +[![Coverage Status](https://coveralls.io/repos/github/huandu/xstrings/badge.svg?branch=master)](https://coveralls.io/github/huandu/xstrings?branch=master) + +Go package [xstrings](https://godoc.org/github.com/huandu/xstrings) is a collection of string functions, which are widely used in other languages but absent in Go package [strings](http://golang.org/pkg/strings). + +All functions are well tested and carefully tuned for performance. + +## Propose a new function ## + +Please review [contributing guideline](CONTRIBUTING.md) and [create new issue](https://github.com/huandu/xstrings/issues) to state why it should be included. + +## Install ## + +Use `go get` to install this library. + + go get github.com/huandu/xstrings + +## API document ## + +See [GoDoc](https://godoc.org/github.com/huandu/xstrings) for full document. + +## Function list ## + +Go functions have a unique naming style. One, who has experience in other language but new in Go, may have difficulties to find out right string function to use. + +Here is a list of functions in [strings](http://golang.org/pkg/strings) and [xstrings](https://godoc.org/github.com/huandu/xstrings) with enough extra information about how to map these functions to their friends in other languages. Hope this list could be helpful for fresh gophers. + +### Package `xstrings` functions ### + +*Keep this table sorted by Function in ascending order.* + +| Function | Friends | # | +| -------- | ------- | --- | +| [Center](https://godoc.org/github.com/huandu/xstrings#Center) | `str.center` in Python; `String#center` in Ruby | [#30](https://github.com/huandu/xstrings/issues/30) | +| [Count](https://godoc.org/github.com/huandu/xstrings#Count) | `String#count` in Ruby | [#16](https://github.com/huandu/xstrings/issues/16) | +| [Delete](https://godoc.org/github.com/huandu/xstrings#Delete) | `String#delete` in Ruby | [#17](https://github.com/huandu/xstrings/issues/17) | +| [ExpandTabs](https://godoc.org/github.com/huandu/xstrings#ExpandTabs) | `str.expandtabs` in Python | [#27](https://github.com/huandu/xstrings/issues/27) | +| [FirstRuneToLower](https://godoc.org/github.com/huandu/xstrings#FirstRuneToLower) | `lcfirst` in PHP or Perl | [#15](https://github.com/huandu/xstrings/issues/15) | +| [FirstRuneToUpper](https://godoc.org/github.com/huandu/xstrings#FirstRuneToUpper) | `String#capitalize` in Ruby; `ucfirst` in PHP or Perl | [#15](https://github.com/huandu/xstrings/issues/15) | +| [Insert](https://godoc.org/github.com/huandu/xstrings#Insert) | `String#insert` in Ruby | [#18](https://github.com/huandu/xstrings/issues/18) | +| [LastPartition](https://godoc.org/github.com/huandu/xstrings#LastPartition) | `str.rpartition` in Python; `String#rpartition` in Ruby | [#19](https://github.com/huandu/xstrings/issues/19) | +| [LeftJustify](https://godoc.org/github.com/huandu/xstrings#LeftJustify) | `str.ljust` in Python; `String#ljust` in Ruby | [#28](https://github.com/huandu/xstrings/issues/28) | +| [Len](https://godoc.org/github.com/huandu/xstrings#Len) | `mb_strlen` in PHP | [#23](https://github.com/huandu/xstrings/issues/23) | +| [Partition](https://godoc.org/github.com/huandu/xstrings#Partition) | `str.partition` in Python; `String#partition` in Ruby | [#10](https://github.com/huandu/xstrings/issues/10) | +| [Reverse](https://godoc.org/github.com/huandu/xstrings#Reverse) | `String#reverse` in Ruby; `strrev` in PHP; `reverse` in Perl | [#7](https://github.com/huandu/xstrings/issues/7) | +| [RightJustify](https://godoc.org/github.com/huandu/xstrings#RightJustify) | `str.rjust` in Python; `String#rjust` in Ruby | [#29](https://github.com/huandu/xstrings/issues/29) | +| [RuneWidth](https://godoc.org/github.com/huandu/xstrings#RuneWidth) | - | [#27](https://github.com/huandu/xstrings/issues/27) | +| [Scrub](https://godoc.org/github.com/huandu/xstrings#Scrub) | `String#scrub` in Ruby | [#20](https://github.com/huandu/xstrings/issues/20) | +| [Shuffle](https://godoc.org/github.com/huandu/xstrings#Shuffle) | `str_shuffle` in PHP | [#13](https://github.com/huandu/xstrings/issues/13) | +| [ShuffleSource](https://godoc.org/github.com/huandu/xstrings#ShuffleSource) | `str_shuffle` in PHP | [#13](https://github.com/huandu/xstrings/issues/13) | +| [Slice](https://godoc.org/github.com/huandu/xstrings#Slice) | `mb_substr` in PHP | [#9](https://github.com/huandu/xstrings/issues/9) | +| [Squeeze](https://godoc.org/github.com/huandu/xstrings#Squeeze) | `String#squeeze` in Ruby | [#11](https://github.com/huandu/xstrings/issues/11) | +| [Successor](https://godoc.org/github.com/huandu/xstrings#Successor) | `String#succ` or `String#next` in Ruby | [#22](https://github.com/huandu/xstrings/issues/22) | +| [SwapCase](https://godoc.org/github.com/huandu/xstrings#SwapCase) | `str.swapcase` in Python; `String#swapcase` in Ruby | [#12](https://github.com/huandu/xstrings/issues/12) | +| [ToCamelCase](https://godoc.org/github.com/huandu/xstrings#ToCamelCase) | `String#camelize` in RoR | [#1](https://github.com/huandu/xstrings/issues/1) | +| [ToKebab](https://godoc.org/github.com/huandu/xstrings#ToKebabCase) | - | [#41](https://github.com/huandu/xstrings/issues/41) | +| [ToSnakeCase](https://godoc.org/github.com/huandu/xstrings#ToSnakeCase) | `String#underscore` in RoR | [#1](https://github.com/huandu/xstrings/issues/1) | +| [Translate](https://godoc.org/github.com/huandu/xstrings#Translate) | `str.translate` in Python; `String#tr` in Ruby; `strtr` in PHP; `tr///` in Perl | [#21](https://github.com/huandu/xstrings/issues/21) | +| [Width](https://godoc.org/github.com/huandu/xstrings#Width) | `mb_strwidth` in PHP | [#26](https://github.com/huandu/xstrings/issues/26) | +| [WordCount](https://godoc.org/github.com/huandu/xstrings#WordCount) | `str_word_count` in PHP | [#14](https://github.com/huandu/xstrings/issues/14) | +| [WordSplit](https://godoc.org/github.com/huandu/xstrings#WordSplit) | - | [#14](https://github.com/huandu/xstrings/issues/14) | + +### Package `strings` functions ### + +*Keep this table sorted by Function in ascending order.* + +| Function | Friends | +| -------- | ------- | +| [Contains](http://golang.org/pkg/strings/#Contains) | `String#include?` in Ruby | +| [ContainsAny](http://golang.org/pkg/strings/#ContainsAny) | - | +| [ContainsRune](http://golang.org/pkg/strings/#ContainsRune) | - | +| [Count](http://golang.org/pkg/strings/#Count) | `str.count` in Python; `substr_count` in PHP | +| [EqualFold](http://golang.org/pkg/strings/#EqualFold) | `stricmp` in PHP; `String#casecmp` in Ruby | +| [Fields](http://golang.org/pkg/strings/#Fields) | `str.split` in Python; `split` in Perl; `String#split` in Ruby | +| [FieldsFunc](http://golang.org/pkg/strings/#FieldsFunc) | - | +| [HasPrefix](http://golang.org/pkg/strings/#HasPrefix) | `str.startswith` in Python; `String#start_with?` in Ruby | +| [HasSuffix](http://golang.org/pkg/strings/#HasSuffix) | `str.endswith` in Python; `String#end_with?` in Ruby | +| [Index](http://golang.org/pkg/strings/#Index) | `str.index` in Python; `String#index` in Ruby; `strpos` in PHP; `index` in Perl | +| [IndexAny](http://golang.org/pkg/strings/#IndexAny) | - | +| [IndexByte](http://golang.org/pkg/strings/#IndexByte) | - | +| [IndexFunc](http://golang.org/pkg/strings/#IndexFunc) | - | +| [IndexRune](http://golang.org/pkg/strings/#IndexRune) | - | +| [Join](http://golang.org/pkg/strings/#Join) | `str.join` in Python; `Array#join` in Ruby; `implode` in PHP; `join` in Perl | +| [LastIndex](http://golang.org/pkg/strings/#LastIndex) | `str.rindex` in Python; `String#rindex`; `strrpos` in PHP; `rindex` in Perl | +| [LastIndexAny](http://golang.org/pkg/strings/#LastIndexAny) | - | +| [LastIndexFunc](http://golang.org/pkg/strings/#LastIndexFunc) | - | +| [Map](http://golang.org/pkg/strings/#Map) | `String#each_codepoint` in Ruby | +| [Repeat](http://golang.org/pkg/strings/#Repeat) | operator `*` in Python and Ruby; `str_repeat` in PHP | +| [Replace](http://golang.org/pkg/strings/#Replace) | `str.replace` in Python; `String#sub` in Ruby; `str_replace` in PHP | +| [Split](http://golang.org/pkg/strings/#Split) | `str.split` in Python; `String#split` in Ruby; `explode` in PHP; `split` in Perl | +| [SplitAfter](http://golang.org/pkg/strings/#SplitAfter) | - | +| [SplitAfterN](http://golang.org/pkg/strings/#SplitAfterN) | - | +| [SplitN](http://golang.org/pkg/strings/#SplitN) | `str.split` in Python; `String#split` in Ruby; `explode` in PHP; `split` in Perl | +| [Title](http://golang.org/pkg/strings/#Title) | `str.title` in Python | +| [ToLower](http://golang.org/pkg/strings/#ToLower) | `str.lower` in Python; `String#downcase` in Ruby; `strtolower` in PHP; `lc` in Perl | +| [ToLowerSpecial](http://golang.org/pkg/strings/#ToLowerSpecial) | - | +| [ToTitle](http://golang.org/pkg/strings/#ToTitle) | - | +| [ToTitleSpecial](http://golang.org/pkg/strings/#ToTitleSpecial) | - | +| [ToUpper](http://golang.org/pkg/strings/#ToUpper) | `str.upper` in Python; `String#upcase` in Ruby; `strtoupper` in PHP; `uc` in Perl | +| [ToUpperSpecial](http://golang.org/pkg/strings/#ToUpperSpecial) | - | +| [Trim](http://golang.org/pkg/strings/#Trim) | `str.strip` in Python; `String#strip` in Ruby; `trim` in PHP | +| [TrimFunc](http://golang.org/pkg/strings/#TrimFunc) | - | +| [TrimLeft](http://golang.org/pkg/strings/#TrimLeft) | `str.lstrip` in Python; `String#lstrip` in Ruby; `ltrim` in PHP | +| [TrimLeftFunc](http://golang.org/pkg/strings/#TrimLeftFunc) | - | +| [TrimPrefix](http://golang.org/pkg/strings/#TrimPrefix) | - | +| [TrimRight](http://golang.org/pkg/strings/#TrimRight) | `str.rstrip` in Python; `String#rstrip` in Ruby; `rtrim` in PHP | +| [TrimRightFunc](http://golang.org/pkg/strings/#TrimRightFunc) | - | +| [TrimSpace](http://golang.org/pkg/strings/#TrimSpace) | `str.strip` in Python; `String#strip` in Ruby; `trim` in PHP | +| [TrimSuffix](http://golang.org/pkg/strings/#TrimSuffix) | `String#chomp` in Ruby; `chomp` in Perl | + +## License ## + +This library is licensed under MIT license. See LICENSE for details. diff --git a/vendor/github.com/huandu/xstrings/common.go b/vendor/github.com/huandu/xstrings/common.go new file mode 100644 index 0000000000..f427cc84e2 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/common.go @@ -0,0 +1,21 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package xstrings + +const bufferMaxInitGrowSize = 2048 + +// Lazy initialize a buffer. +func allocBuffer(orig, cur string) *stringBuilder { + output := &stringBuilder{} + maxSize := len(orig) * 4 + + // Avoid to reserve too much memory at once. + if maxSize > bufferMaxInitGrowSize { + maxSize = bufferMaxInitGrowSize + } + + output.Grow(maxSize) + output.WriteString(orig[:len(orig)-len(cur)]) + return output +} diff --git a/vendor/github.com/huandu/xstrings/convert.go b/vendor/github.com/huandu/xstrings/convert.go new file mode 100644 index 0000000000..3d5a34950b --- /dev/null +++ b/vendor/github.com/huandu/xstrings/convert.go @@ -0,0 +1,590 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package xstrings + +import ( + "math/rand" + "unicode" + "unicode/utf8" +) + +// ToCamelCase is to convert words separated by space, underscore and hyphen to camel case. +// +// Some samples. +// "some_words" => "SomeWords" +// "http_server" => "HttpServer" +// "no_https" => "NoHttps" +// "_complex__case_" => "_Complex_Case_" +// "some words" => "SomeWords" +func ToCamelCase(str string) string { + if len(str) == 0 { + return "" + } + + buf := &stringBuilder{} + var r0, r1 rune + var size int + + // leading connector will appear in output. + for len(str) > 0 { + r0, size = utf8.DecodeRuneInString(str) + str = str[size:] + + if !isConnector(r0) { + r0 = unicode.ToUpper(r0) + break + } + + buf.WriteRune(r0) + } + + if len(str) == 0 { + // A special case for a string contains only 1 rune. + if size != 0 { + buf.WriteRune(r0) + } + + return buf.String() + } + + for len(str) > 0 { + r1 = r0 + r0, size = utf8.DecodeRuneInString(str) + str = str[size:] + + if isConnector(r0) && isConnector(r1) { + buf.WriteRune(r1) + continue + } + + if isConnector(r1) { + r0 = unicode.ToUpper(r0) + } else { + r0 = unicode.ToLower(r0) + buf.WriteRune(r1) + } + } + + buf.WriteRune(r0) + return buf.String() +} + +// ToSnakeCase can convert all upper case characters in a string to +// snake case format. +// +// Some samples. +// "FirstName" => "first_name" +// "HTTPServer" => "http_server" +// "NoHTTPS" => "no_https" +// "GO_PATH" => "go_path" +// "GO PATH" => "go_path" // space is converted to underscore. +// "GO-PATH" => "go_path" // hyphen is converted to underscore. +// "http2xx" => "http_2xx" // insert an underscore before a number and after an alphabet. +// "HTTP20xOK" => "http_20x_ok" +// "Duration2m3s" => "duration_2m3s" +// "Bld4Floor3rd" => "bld4_floor_3rd" +func ToSnakeCase(str string) string { + return camelCaseToLowerCase(str, '_') +} + +// ToKebabCase can convert all upper case characters in a string to +// kebab case format. +// +// Some samples. +// "FirstName" => "first-name" +// "HTTPServer" => "http-server" +// "NoHTTPS" => "no-https" +// "GO_PATH" => "go-path" +// "GO PATH" => "go-path" // space is converted to '-'. +// "GO-PATH" => "go-path" // hyphen is converted to '-'. +// "http2xx" => "http-2xx" // insert an underscore before a number and after an alphabet. +// "HTTP20xOK" => "http-20x-ok" +// "Duration2m3s" => "duration-2m3s" +// "Bld4Floor3rd" => "bld4-floor-3rd" +func ToKebabCase(str string) string { + return camelCaseToLowerCase(str, '-') +} + +func camelCaseToLowerCase(str string, connector rune) string { + if len(str) == 0 { + return "" + } + + buf := &stringBuilder{} + wt, word, remaining := nextWord(str) + + for len(remaining) > 0 { + if wt != connectorWord { + toLower(buf, wt, word, connector) + } + + prev := wt + last := word + wt, word, remaining = nextWord(remaining) + + switch prev { + case numberWord: + for wt == alphabetWord || wt == numberWord { + toLower(buf, wt, word, connector) + wt, word, remaining = nextWord(remaining) + } + + if wt != invalidWord && wt != punctWord { + buf.WriteRune(connector) + } + + case connectorWord: + toLower(buf, prev, last, connector) + + case punctWord: + // nothing. + + default: + if wt != numberWord { + if wt != connectorWord && wt != punctWord { + buf.WriteRune(connector) + } + + break + } + + if len(remaining) == 0 { + break + } + + last := word + wt, word, remaining = nextWord(remaining) + + // consider number as a part of previous word. + // e.g. "Bld4Floor" => "bld4_floor" + if wt != alphabetWord { + toLower(buf, numberWord, last, connector) + + if wt != connectorWord && wt != punctWord { + buf.WriteRune(connector) + } + + break + } + + // if there are some lower case letters following a number, + // add connector before the number. + // e.g. "HTTP2xx" => "http_2xx" + buf.WriteRune(connector) + toLower(buf, numberWord, last, connector) + + for wt == alphabetWord || wt == numberWord { + toLower(buf, wt, word, connector) + wt, word, remaining = nextWord(remaining) + } + + if wt != invalidWord && wt != connectorWord && wt != punctWord { + buf.WriteRune(connector) + } + } + } + + toLower(buf, wt, word, connector) + return buf.String() +} + +func isConnector(r rune) bool { + return r == '-' || r == '_' || unicode.IsSpace(r) +} + +type wordType int + +const ( + invalidWord wordType = iota + numberWord + upperCaseWord + alphabetWord + connectorWord + punctWord + otherWord +) + +func nextWord(str string) (wt wordType, word, remaining string) { + if len(str) == 0 { + return + } + + var offset int + remaining = str + r, size := nextValidRune(remaining, utf8.RuneError) + offset += size + + if r == utf8.RuneError { + wt = invalidWord + word = str[:offset] + remaining = str[offset:] + return + } + + switch { + case isConnector(r): + wt = connectorWord + remaining = remaining[size:] + + for len(remaining) > 0 { + r, size = nextValidRune(remaining, r) + + if !isConnector(r) { + break + } + + offset += size + remaining = remaining[size:] + } + + case unicode.IsPunct(r): + wt = punctWord + remaining = remaining[size:] + + for len(remaining) > 0 { + r, size = nextValidRune(remaining, r) + + if !unicode.IsPunct(r) { + break + } + + offset += size + remaining = remaining[size:] + } + + case unicode.IsUpper(r): + wt = upperCaseWord + remaining = remaining[size:] + + if len(remaining) == 0 { + break + } + + r, size = nextValidRune(remaining, r) + + switch { + case unicode.IsUpper(r): + prevSize := size + offset += size + remaining = remaining[size:] + + for len(remaining) > 0 { + r, size = nextValidRune(remaining, r) + + if !unicode.IsUpper(r) { + break + } + + prevSize = size + offset += size + remaining = remaining[size:] + } + + // it's a bit complex when dealing with a case like "HTTPStatus". + // it's expected to be splitted into "HTTP" and "Status". + // Therefore "S" should be in remaining instead of word. + if len(remaining) > 0 && isAlphabet(r) { + offset -= prevSize + remaining = str[offset:] + } + + case isAlphabet(r): + offset += size + remaining = remaining[size:] + + for len(remaining) > 0 { + r, size = nextValidRune(remaining, r) + + if !isAlphabet(r) || unicode.IsUpper(r) { + break + } + + offset += size + remaining = remaining[size:] + } + } + + case isAlphabet(r): + wt = alphabetWord + remaining = remaining[size:] + + for len(remaining) > 0 { + r, size = nextValidRune(remaining, r) + + if !isAlphabet(r) || unicode.IsUpper(r) { + break + } + + offset += size + remaining = remaining[size:] + } + + case unicode.IsNumber(r): + wt = numberWord + remaining = remaining[size:] + + for len(remaining) > 0 { + r, size = nextValidRune(remaining, r) + + if !unicode.IsNumber(r) { + break + } + + offset += size + remaining = remaining[size:] + } + + default: + wt = otherWord + remaining = remaining[size:] + + for len(remaining) > 0 { + r, size = nextValidRune(remaining, r) + + if size == 0 || isConnector(r) || isAlphabet(r) || unicode.IsNumber(r) || unicode.IsPunct(r) { + break + } + + offset += size + remaining = remaining[size:] + } + } + + word = str[:offset] + return +} + +func nextValidRune(str string, prev rune) (r rune, size int) { + var sz int + + for len(str) > 0 { + r, sz = utf8.DecodeRuneInString(str) + size += sz + + if r != utf8.RuneError { + return + } + + str = str[sz:] + } + + r = prev + return +} + +func toLower(buf *stringBuilder, wt wordType, str string, connector rune) { + buf.Grow(buf.Len() + len(str)) + + if wt != upperCaseWord && wt != connectorWord { + buf.WriteString(str) + return + } + + for len(str) > 0 { + r, size := utf8.DecodeRuneInString(str) + str = str[size:] + + if isConnector(r) { + buf.WriteRune(connector) + } else if unicode.IsUpper(r) { + buf.WriteRune(unicode.ToLower(r)) + } else { + buf.WriteRune(r) + } + } +} + +// SwapCase will swap characters case from upper to lower or lower to upper. +func SwapCase(str string) string { + var r rune + var size int + + buf := &stringBuilder{} + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + + switch { + case unicode.IsUpper(r): + buf.WriteRune(unicode.ToLower(r)) + + case unicode.IsLower(r): + buf.WriteRune(unicode.ToUpper(r)) + + default: + buf.WriteRune(r) + } + + str = str[size:] + } + + return buf.String() +} + +// FirstRuneToUpper converts first rune to upper case if necessary. +func FirstRuneToUpper(str string) string { + if str == "" { + return str + } + + r, size := utf8.DecodeRuneInString(str) + + if !unicode.IsLower(r) { + return str + } + + buf := &stringBuilder{} + buf.WriteRune(unicode.ToUpper(r)) + buf.WriteString(str[size:]) + return buf.String() +} + +// FirstRuneToLower converts first rune to lower case if necessary. +func FirstRuneToLower(str string) string { + if str == "" { + return str + } + + r, size := utf8.DecodeRuneInString(str) + + if !unicode.IsUpper(r) { + return str + } + + buf := &stringBuilder{} + buf.WriteRune(unicode.ToLower(r)) + buf.WriteString(str[size:]) + return buf.String() +} + +// Shuffle randomizes runes in a string and returns the result. +// It uses default random source in `math/rand`. +func Shuffle(str string) string { + if str == "" { + return str + } + + runes := []rune(str) + index := 0 + + for i := len(runes) - 1; i > 0; i-- { + index = rand.Intn(i + 1) + + if i != index { + runes[i], runes[index] = runes[index], runes[i] + } + } + + return string(runes) +} + +// ShuffleSource randomizes runes in a string with given random source. +func ShuffleSource(str string, src rand.Source) string { + if str == "" { + return str + } + + runes := []rune(str) + index := 0 + r := rand.New(src) + + for i := len(runes) - 1; i > 0; i-- { + index = r.Intn(i + 1) + + if i != index { + runes[i], runes[index] = runes[index], runes[i] + } + } + + return string(runes) +} + +// Successor returns the successor to string. +// +// If there is one alphanumeric rune is found in string, increase the rune by 1. +// If increment generates a "carry", the rune to the left of it is incremented. +// This process repeats until there is no carry, adding an additional rune if necessary. +// +// If there is no alphanumeric rune, the rightmost rune will be increased by 1 +// regardless whether the result is a valid rune or not. +// +// Only following characters are alphanumeric. +// * a - z +// * A - Z +// * 0 - 9 +// +// Samples (borrowed from ruby's String#succ document): +// "abcd" => "abce" +// "THX1138" => "THX1139" +// "<>" => "<>" +// "1999zzz" => "2000aaa" +// "ZZZ9999" => "AAAA0000" +// "***" => "**+" +func Successor(str string) string { + if str == "" { + return str + } + + var r rune + var i int + carry := ' ' + runes := []rune(str) + l := len(runes) + lastAlphanumeric := l + + for i = l - 1; i >= 0; i-- { + r = runes[i] + + if ('a' <= r && r <= 'y') || + ('A' <= r && r <= 'Y') || + ('0' <= r && r <= '8') { + runes[i]++ + carry = ' ' + lastAlphanumeric = i + break + } + + switch r { + case 'z': + runes[i] = 'a' + carry = 'a' + lastAlphanumeric = i + + case 'Z': + runes[i] = 'A' + carry = 'A' + lastAlphanumeric = i + + case '9': + runes[i] = '0' + carry = '0' + lastAlphanumeric = i + } + } + + // Needs to add one character for carry. + if i < 0 && carry != ' ' { + buf := &stringBuilder{} + buf.Grow(l + 4) // Reserve enough space for write. + + if lastAlphanumeric != 0 { + buf.WriteString(str[:lastAlphanumeric]) + } + + buf.WriteRune(carry) + + for _, r = range runes[lastAlphanumeric:] { + buf.WriteRune(r) + } + + return buf.String() + } + + // No alphanumeric character. Simply increase last rune's value. + if lastAlphanumeric == l { + runes[l-1]++ + } + + return string(runes) +} diff --git a/vendor/github.com/huandu/xstrings/count.go b/vendor/github.com/huandu/xstrings/count.go new file mode 100644 index 0000000000..f96e38703a --- /dev/null +++ b/vendor/github.com/huandu/xstrings/count.go @@ -0,0 +1,120 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package xstrings + +import ( + "unicode" + "unicode/utf8" +) + +// Len returns str's utf8 rune length. +func Len(str string) int { + return utf8.RuneCountInString(str) +} + +// WordCount returns number of words in a string. +// +// Word is defined as a locale dependent string containing alphabetic characters, +// which may also contain but not start with `'` and `-` characters. +func WordCount(str string) int { + var r rune + var size, n int + + inWord := false + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + + switch { + case isAlphabet(r): + if !inWord { + inWord = true + n++ + } + + case inWord && (r == '\'' || r == '-'): + // Still in word. + + default: + inWord = false + } + + str = str[size:] + } + + return n +} + +const minCJKCharacter = '\u3400' + +// Checks r is a letter but not CJK character. +func isAlphabet(r rune) bool { + if !unicode.IsLetter(r) { + return false + } + + switch { + // Quick check for non-CJK character. + case r < minCJKCharacter: + return true + + // Common CJK characters. + case r >= '\u4E00' && r <= '\u9FCC': + return false + + // Rare CJK characters. + case r >= '\u3400' && r <= '\u4D85': + return false + + // Rare and historic CJK characters. + case r >= '\U00020000' && r <= '\U0002B81D': + return false + } + + return true +} + +// Width returns string width in monotype font. +// Multi-byte characters are usually twice the width of single byte characters. +// +// Algorithm comes from `mb_strwidth` in PHP. +// http://php.net/manual/en/function.mb-strwidth.php +func Width(str string) int { + var r rune + var size, n int + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + n += RuneWidth(r) + str = str[size:] + } + + return n +} + +// RuneWidth returns character width in monotype font. +// Multi-byte characters are usually twice the width of single byte characters. +// +// Algorithm comes from `mb_strwidth` in PHP. +// http://php.net/manual/en/function.mb-strwidth.php +func RuneWidth(r rune) int { + switch { + case r == utf8.RuneError || r < '\x20': + return 0 + + case '\x20' <= r && r < '\u2000': + return 1 + + case '\u2000' <= r && r < '\uFF61': + return 2 + + case '\uFF61' <= r && r < '\uFFA0': + return 1 + + case '\uFFA0' <= r: + return 2 + } + + return 0 +} diff --git a/vendor/github.com/huandu/xstrings/doc.go b/vendor/github.com/huandu/xstrings/doc.go new file mode 100644 index 0000000000..1a6ef069f6 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/doc.go @@ -0,0 +1,8 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +// Package xstrings is to provide string algorithms which are useful but not included in `strings` package. +// See project home page for details. https://github.com/huandu/xstrings +// +// Package xstrings assumes all strings are encoded in utf8. +package xstrings diff --git a/vendor/github.com/huandu/xstrings/format.go b/vendor/github.com/huandu/xstrings/format.go new file mode 100644 index 0000000000..8cd76c525c --- /dev/null +++ b/vendor/github.com/huandu/xstrings/format.go @@ -0,0 +1,169 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package xstrings + +import ( + "unicode/utf8" +) + +// ExpandTabs can expand tabs ('\t') rune in str to one or more spaces dpending on +// current column and tabSize. +// The column number is reset to zero after each newline ('\n') occurring in the str. +// +// ExpandTabs uses RuneWidth to decide rune's width. +// For example, CJK characters will be treated as two characters. +// +// If tabSize <= 0, ExpandTabs panics with error. +// +// Samples: +// ExpandTabs("a\tbc\tdef\tghij\tk", 4) => "a bc def ghij k" +// ExpandTabs("abcdefg\thij\nk\tl", 4) => "abcdefg hij\nk l" +// ExpandTabs("z中\t文\tw", 4) => "z中 文 w" +func ExpandTabs(str string, tabSize int) string { + if tabSize <= 0 { + panic("tab size must be positive") + } + + var r rune + var i, size, column, expand int + var output *stringBuilder + + orig := str + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + + if r == '\t' { + expand = tabSize - column%tabSize + + if output == nil { + output = allocBuffer(orig, str) + } + + for i = 0; i < expand; i++ { + output.WriteRune(' ') + } + + column += expand + } else { + if r == '\n' { + column = 0 + } else { + column += RuneWidth(r) + } + + if output != nil { + output.WriteRune(r) + } + } + + str = str[size:] + } + + if output == nil { + return orig + } + + return output.String() +} + +// LeftJustify returns a string with pad string at right side if str's rune length is smaller than length. +// If str's rune length is larger than length, str itself will be returned. +// +// If pad is an empty string, str will be returned. +// +// Samples: +// LeftJustify("hello", 4, " ") => "hello" +// LeftJustify("hello", 10, " ") => "hello " +// LeftJustify("hello", 10, "123") => "hello12312" +func LeftJustify(str string, length int, pad string) string { + l := Len(str) + + if l >= length || pad == "" { + return str + } + + remains := length - l + padLen := Len(pad) + + output := &stringBuilder{} + output.Grow(len(str) + (remains/padLen+1)*len(pad)) + output.WriteString(str) + writePadString(output, pad, padLen, remains) + return output.String() +} + +// RightJustify returns a string with pad string at left side if str's rune length is smaller than length. +// If str's rune length is larger than length, str itself will be returned. +// +// If pad is an empty string, str will be returned. +// +// Samples: +// RightJustify("hello", 4, " ") => "hello" +// RightJustify("hello", 10, " ") => " hello" +// RightJustify("hello", 10, "123") => "12312hello" +func RightJustify(str string, length int, pad string) string { + l := Len(str) + + if l >= length || pad == "" { + return str + } + + remains := length - l + padLen := Len(pad) + + output := &stringBuilder{} + output.Grow(len(str) + (remains/padLen+1)*len(pad)) + writePadString(output, pad, padLen, remains) + output.WriteString(str) + return output.String() +} + +// Center returns a string with pad string at both side if str's rune length is smaller than length. +// If str's rune length is larger than length, str itself will be returned. +// +// If pad is an empty string, str will be returned. +// +// Samples: +// Center("hello", 4, " ") => "hello" +// Center("hello", 10, " ") => " hello " +// Center("hello", 10, "123") => "12hello123" +func Center(str string, length int, pad string) string { + l := Len(str) + + if l >= length || pad == "" { + return str + } + + remains := length - l + padLen := Len(pad) + + output := &stringBuilder{} + output.Grow(len(str) + (remains/padLen+1)*len(pad)) + writePadString(output, pad, padLen, remains/2) + output.WriteString(str) + writePadString(output, pad, padLen, (remains+1)/2) + return output.String() +} + +func writePadString(output *stringBuilder, pad string, padLen, remains int) { + var r rune + var size int + + repeats := remains / padLen + + for i := 0; i < repeats; i++ { + output.WriteString(pad) + } + + remains = remains % padLen + + if remains != 0 { + for i := 0; i < remains; i++ { + r, size = utf8.DecodeRuneInString(pad) + output.WriteRune(r) + pad = pad[size:] + } + } +} diff --git a/vendor/github.com/huandu/xstrings/manipulate.go b/vendor/github.com/huandu/xstrings/manipulate.go new file mode 100644 index 0000000000..64075f9bb8 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/manipulate.go @@ -0,0 +1,216 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package xstrings + +import ( + "strings" + "unicode/utf8" +) + +// Reverse a utf8 encoded string. +func Reverse(str string) string { + var size int + + tail := len(str) + buf := make([]byte, tail) + s := buf + + for len(str) > 0 { + _, size = utf8.DecodeRuneInString(str) + tail -= size + s = append(s[:tail], []byte(str[:size])...) + str = str[size:] + } + + return string(buf) +} + +// Slice a string by rune. +// +// Start must satisfy 0 <= start <= rune length. +// +// End can be positive, zero or negative. +// If end >= 0, start and end must satisfy start <= end <= rune length. +// If end < 0, it means slice to the end of string. +// +// Otherwise, Slice will panic as out of range. +func Slice(str string, start, end int) string { + var size, startPos, endPos int + + origin := str + + if start < 0 || end > len(str) || (end >= 0 && start > end) { + panic("out of range") + } + + if end >= 0 { + end -= start + } + + for start > 0 && len(str) > 0 { + _, size = utf8.DecodeRuneInString(str) + start-- + startPos += size + str = str[size:] + } + + if end < 0 { + return origin[startPos:] + } + + endPos = startPos + + for end > 0 && len(str) > 0 { + _, size = utf8.DecodeRuneInString(str) + end-- + endPos += size + str = str[size:] + } + + if len(str) == 0 && (start > 0 || end > 0) { + panic("out of range") + } + + return origin[startPos:endPos] +} + +// Partition splits a string by sep into three parts. +// The return value is a slice of strings with head, match and tail. +// +// If str contains sep, for example "hello" and "l", Partition returns +// "he", "l", "lo" +// +// If str doesn't contain sep, for example "hello" and "x", Partition returns +// "hello", "", "" +func Partition(str, sep string) (head, match, tail string) { + index := strings.Index(str, sep) + + if index == -1 { + head = str + return + } + + head = str[:index] + match = str[index : index+len(sep)] + tail = str[index+len(sep):] + return +} + +// LastPartition splits a string by last instance of sep into three parts. +// The return value is a slice of strings with head, match and tail. +// +// If str contains sep, for example "hello" and "l", LastPartition returns +// "hel", "l", "o" +// +// If str doesn't contain sep, for example "hello" and "x", LastPartition returns +// "", "", "hello" +func LastPartition(str, sep string) (head, match, tail string) { + index := strings.LastIndex(str, sep) + + if index == -1 { + tail = str + return + } + + head = str[:index] + match = str[index : index+len(sep)] + tail = str[index+len(sep):] + return +} + +// Insert src into dst at given rune index. +// Index is counted by runes instead of bytes. +// +// If index is out of range of dst, panic with out of range. +func Insert(dst, src string, index int) string { + return Slice(dst, 0, index) + src + Slice(dst, index, -1) +} + +// Scrub scrubs invalid utf8 bytes with repl string. +// Adjacent invalid bytes are replaced only once. +func Scrub(str, repl string) string { + var buf *stringBuilder + var r rune + var size, pos int + var hasError bool + + origin := str + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + + if r == utf8.RuneError { + if !hasError { + if buf == nil { + buf = &stringBuilder{} + } + + buf.WriteString(origin[:pos]) + hasError = true + } + } else if hasError { + hasError = false + buf.WriteString(repl) + + origin = origin[pos:] + pos = 0 + } + + pos += size + str = str[size:] + } + + if buf != nil { + buf.WriteString(origin) + return buf.String() + } + + // No invalid byte. + return origin +} + +// WordSplit splits a string into words. Returns a slice of words. +// If there is no word in a string, return nil. +// +// Word is defined as a locale dependent string containing alphabetic characters, +// which may also contain but not start with `'` and `-` characters. +func WordSplit(str string) []string { + var word string + var words []string + var r rune + var size, pos int + + inWord := false + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + + switch { + case isAlphabet(r): + if !inWord { + inWord = true + word = str + pos = 0 + } + + case inWord && (r == '\'' || r == '-'): + // Still in word. + + default: + if inWord { + inWord = false + words = append(words, word[:pos]) + } + } + + pos += size + str = str[size:] + } + + if inWord { + words = append(words, word[:pos]) + } + + return words +} diff --git a/vendor/github.com/huandu/xstrings/stringbuilder.go b/vendor/github.com/huandu/xstrings/stringbuilder.go new file mode 100644 index 0000000000..bb0919d32f --- /dev/null +++ b/vendor/github.com/huandu/xstrings/stringbuilder.go @@ -0,0 +1,7 @@ +//+build go1.10 + +package xstrings + +import "strings" + +type stringBuilder = strings.Builder diff --git a/vendor/github.com/huandu/xstrings/stringbuilder_go110.go b/vendor/github.com/huandu/xstrings/stringbuilder_go110.go new file mode 100644 index 0000000000..dac389d139 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/stringbuilder_go110.go @@ -0,0 +1,9 @@ +//+build !go1.10 + +package xstrings + +import "bytes" + +type stringBuilder struct { + bytes.Buffer +} diff --git a/vendor/github.com/huandu/xstrings/translate.go b/vendor/github.com/huandu/xstrings/translate.go new file mode 100644 index 0000000000..42e694fb17 --- /dev/null +++ b/vendor/github.com/huandu/xstrings/translate.go @@ -0,0 +1,546 @@ +// Copyright 2015 Huan Du. All rights reserved. +// Licensed under the MIT license that can be found in the LICENSE file. + +package xstrings + +import ( + "unicode" + "unicode/utf8" +) + +type runeRangeMap struct { + FromLo rune // Lower bound of range map. + FromHi rune // An inclusive higher bound of range map. + ToLo rune + ToHi rune +} + +type runeDict struct { + Dict [unicode.MaxASCII + 1]rune +} + +type runeMap map[rune]rune + +// Translator can translate string with pre-compiled from and to patterns. +// If a from/to pattern pair needs to be used more than once, it's recommended +// to create a Translator and reuse it. +type Translator struct { + quickDict *runeDict // A quick dictionary to look up rune by index. Only available for latin runes. + runeMap runeMap // Rune map for translation. + ranges []*runeRangeMap // Ranges of runes. + mappedRune rune // If mappedRune >= 0, all matched runes are translated to the mappedRune. + reverted bool // If to pattern is empty, all matched characters will be deleted. + hasPattern bool +} + +// NewTranslator creates new Translator through a from/to pattern pair. +func NewTranslator(from, to string) *Translator { + tr := &Translator{} + + if from == "" { + return tr + } + + reverted := from[0] == '^' + deletion := len(to) == 0 + + if reverted { + from = from[1:] + } + + var fromStart, fromEnd, fromRangeStep rune + var toStart, toEnd, toRangeStep rune + var fromRangeSize, toRangeSize rune + var singleRunes []rune + + // Update the to rune range. + updateRange := func() { + // No more rune to read in the to rune pattern. + if toEnd == utf8.RuneError { + return + } + + if toRangeStep == 0 { + to, toStart, toEnd, toRangeStep = nextRuneRange(to, toEnd) + return + } + + // Current range is not empty. Consume 1 rune from start. + if toStart != toEnd { + toStart += toRangeStep + return + } + + // No more rune. Repeat the last rune. + if to == "" { + toEnd = utf8.RuneError + return + } + + // Both start and end are used. Read two more runes from the to pattern. + to, toStart, toEnd, toRangeStep = nextRuneRange(to, utf8.RuneError) + } + + if deletion { + toStart = utf8.RuneError + toEnd = utf8.RuneError + } else { + // If from pattern is reverted, only the last rune in the to pattern will be used. + if reverted { + var size int + + for len(to) > 0 { + toStart, size = utf8.DecodeRuneInString(to) + to = to[size:] + } + + toEnd = utf8.RuneError + } else { + to, toStart, toEnd, toRangeStep = nextRuneRange(to, utf8.RuneError) + } + } + + fromEnd = utf8.RuneError + + for len(from) > 0 { + from, fromStart, fromEnd, fromRangeStep = nextRuneRange(from, fromEnd) + + // fromStart is a single character. Just map it with a rune in the to pattern. + if fromRangeStep == 0 { + singleRunes = tr.addRune(fromStart, toStart, singleRunes) + updateRange() + continue + } + + for toEnd != utf8.RuneError && fromStart != fromEnd { + // If mapped rune is a single character instead of a range, simply shift first + // rune in the range. + if toRangeStep == 0 { + singleRunes = tr.addRune(fromStart, toStart, singleRunes) + updateRange() + fromStart += fromRangeStep + continue + } + + fromRangeSize = (fromEnd - fromStart) * fromRangeStep + toRangeSize = (toEnd - toStart) * toRangeStep + + // Not enough runes in the to pattern. Need to read more. + if fromRangeSize > toRangeSize { + fromStart, toStart = tr.addRuneRange(fromStart, fromStart+toRangeSize*fromRangeStep, toStart, toEnd, singleRunes) + fromStart += fromRangeStep + updateRange() + + // Edge case: If fromRangeSize == toRangeSize + 1, the last fromStart value needs be considered + // as a single rune. + if fromStart == fromEnd { + singleRunes = tr.addRune(fromStart, toStart, singleRunes) + updateRange() + } + + continue + } + + fromStart, toStart = tr.addRuneRange(fromStart, fromEnd, toStart, toStart+fromRangeSize*toRangeStep, singleRunes) + updateRange() + break + } + + if fromStart == fromEnd { + fromEnd = utf8.RuneError + continue + } + + _, toStart = tr.addRuneRange(fromStart, fromEnd, toStart, toStart, singleRunes) + fromEnd = utf8.RuneError + } + + if fromEnd != utf8.RuneError { + tr.addRune(fromEnd, toStart, singleRunes) + } + + tr.reverted = reverted + tr.mappedRune = -1 + tr.hasPattern = true + + // Translate RuneError only if in deletion or reverted mode. + if deletion || reverted { + tr.mappedRune = toStart + } + + return tr +} + +func (tr *Translator) addRune(from, to rune, singleRunes []rune) []rune { + if from <= unicode.MaxASCII { + if tr.quickDict == nil { + tr.quickDict = &runeDict{} + } + + tr.quickDict.Dict[from] = to + } else { + if tr.runeMap == nil { + tr.runeMap = make(runeMap) + } + + tr.runeMap[from] = to + } + + singleRunes = append(singleRunes, from) + return singleRunes +} + +func (tr *Translator) addRuneRange(fromLo, fromHi, toLo, toHi rune, singleRunes []rune) (rune, rune) { + var r rune + var rrm *runeRangeMap + + if fromLo < fromHi { + rrm = &runeRangeMap{ + FromLo: fromLo, + FromHi: fromHi, + ToLo: toLo, + ToHi: toHi, + } + } else { + rrm = &runeRangeMap{ + FromLo: fromHi, + FromHi: fromLo, + ToLo: toHi, + ToHi: toLo, + } + } + + // If there is any single rune conflicts with this rune range, clear single rune record. + for _, r = range singleRunes { + if rrm.FromLo <= r && r <= rrm.FromHi { + if r <= unicode.MaxASCII { + tr.quickDict.Dict[r] = 0 + } else { + delete(tr.runeMap, r) + } + } + } + + tr.ranges = append(tr.ranges, rrm) + return fromHi, toHi +} + +func nextRuneRange(str string, last rune) (remaining string, start, end rune, rangeStep rune) { + var r rune + var size int + + remaining = str + escaping := false + isRange := false + + for len(remaining) > 0 { + r, size = utf8.DecodeRuneInString(remaining) + remaining = remaining[size:] + + // Parse special characters. + if !escaping { + if r == '\\' { + escaping = true + continue + } + + if r == '-' { + // Ignore slash at beginning of string. + if last == utf8.RuneError { + continue + } + + start = last + isRange = true + continue + } + } + + escaping = false + + if last != utf8.RuneError { + // This is a range which start and end are the same. + // Considier it as a normal character. + if isRange && last == r { + isRange = false + continue + } + + start = last + end = r + + if isRange { + if start < end { + rangeStep = 1 + } else { + rangeStep = -1 + } + } + + return + } + + last = r + } + + start = last + end = utf8.RuneError + return +} + +// Translate str with a from/to pattern pair. +// +// See comment in Translate function for usage and samples. +func (tr *Translator) Translate(str string) string { + if !tr.hasPattern || str == "" { + return str + } + + var r rune + var size int + var needTr bool + + orig := str + + var output *stringBuilder + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + r, needTr = tr.TranslateRune(r) + + if needTr && output == nil { + output = allocBuffer(orig, str) + } + + if r != utf8.RuneError && output != nil { + output.WriteRune(r) + } + + str = str[size:] + } + + // No character is translated. + if output == nil { + return orig + } + + return output.String() +} + +// TranslateRune return translated rune and true if r matches the from pattern. +// If r doesn't match the pattern, original r is returned and translated is false. +func (tr *Translator) TranslateRune(r rune) (result rune, translated bool) { + switch { + case tr.quickDict != nil: + if r <= unicode.MaxASCII { + result = tr.quickDict.Dict[r] + + if result != 0 { + translated = true + + if tr.mappedRune >= 0 { + result = tr.mappedRune + } + + break + } + } + + fallthrough + + case tr.runeMap != nil: + var ok bool + + if result, ok = tr.runeMap[r]; ok { + translated = true + + if tr.mappedRune >= 0 { + result = tr.mappedRune + } + + break + } + + fallthrough + + default: + var rrm *runeRangeMap + ranges := tr.ranges + + for i := len(ranges) - 1; i >= 0; i-- { + rrm = ranges[i] + + if rrm.FromLo <= r && r <= rrm.FromHi { + translated = true + + if tr.mappedRune >= 0 { + result = tr.mappedRune + break + } + + if rrm.ToLo < rrm.ToHi { + result = rrm.ToLo + r - rrm.FromLo + } else if rrm.ToLo > rrm.ToHi { + // ToHi can be smaller than ToLo if range is from higher to lower. + result = rrm.ToLo - r + rrm.FromLo + } else { + result = rrm.ToLo + } + + break + } + } + } + + if tr.reverted { + if !translated { + result = tr.mappedRune + } + + translated = !translated + } + + if !translated { + result = r + } + + return +} + +// HasPattern returns true if Translator has one pattern at least. +func (tr *Translator) HasPattern() bool { + return tr.hasPattern +} + +// Translate str with the characters defined in from replaced by characters defined in to. +// +// From and to are patterns representing a set of characters. Pattern is defined as following. +// +// * Special characters +// * '-' means a range of runes, e.g. +// * "a-z" means all characters from 'a' to 'z' inclusive; +// * "z-a" means all characters from 'z' to 'a' inclusive. +// * '^' as first character means a set of all runes excepted listed, e.g. +// * "^a-z" means all characters except 'a' to 'z' inclusive. +// * '\' escapes special characters. +// * Normal character represents itself, e.g. "abc" is a set including 'a', 'b' and 'c'. +// +// Translate will try to find a 1:1 mapping from from to to. +// If to is smaller than from, last rune in to will be used to map "out of range" characters in from. +// +// Note that '^' only works in the from pattern. It will be considered as a normal character in the to pattern. +// +// If the to pattern is an empty string, Translate works exactly the same as Delete. +// +// Samples: +// Translate("hello", "aeiou", "12345") => "h2ll4" +// Translate("hello", "a-z", "A-Z") => "HELLO" +// Translate("hello", "z-a", "a-z") => "svool" +// Translate("hello", "aeiou", "*") => "h*ll*" +// Translate("hello", "^l", "*") => "**ll*" +// Translate("hello ^ world", `\^lo`, "*") => "he*** * w*r*d" +func Translate(str, from, to string) string { + tr := NewTranslator(from, to) + return tr.Translate(str) +} + +// Delete runes in str matching the pattern. +// Pattern is defined in Translate function. +// +// Samples: +// Delete("hello", "aeiou") => "hll" +// Delete("hello", "a-k") => "llo" +// Delete("hello", "^a-k") => "he" +func Delete(str, pattern string) string { + tr := NewTranslator(pattern, "") + return tr.Translate(str) +} + +// Count how many runes in str match the pattern. +// Pattern is defined in Translate function. +// +// Samples: +// Count("hello", "aeiou") => 3 +// Count("hello", "a-k") => 3 +// Count("hello", "^a-k") => 2 +func Count(str, pattern string) int { + if pattern == "" || str == "" { + return 0 + } + + var r rune + var size int + var matched bool + + tr := NewTranslator(pattern, "") + cnt := 0 + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + str = str[size:] + + if _, matched = tr.TranslateRune(r); matched { + cnt++ + } + } + + return cnt +} + +// Squeeze deletes adjacent repeated runes in str. +// If pattern is not empty, only runes matching the pattern will be squeezed. +// +// Samples: +// Squeeze("hello", "") => "helo" +// Squeeze("hello", "m-z") => "hello" +// Squeeze("hello world", " ") => "hello world" +func Squeeze(str, pattern string) string { + var last, r rune + var size int + var skipSqueeze, matched bool + var tr *Translator + var output *stringBuilder + + orig := str + last = -1 + + if len(pattern) > 0 { + tr = NewTranslator(pattern, "") + } + + for len(str) > 0 { + r, size = utf8.DecodeRuneInString(str) + + // Need to squeeze the str. + if last == r && !skipSqueeze { + if tr != nil { + if _, matched = tr.TranslateRune(r); !matched { + skipSqueeze = true + } + } + + if output == nil { + output = allocBuffer(orig, str) + } + + if skipSqueeze { + output.WriteRune(r) + } + } else { + if output != nil { + output.WriteRune(r) + } + + last = r + skipSqueeze = false + } + + str = str[size:] + } + + if output == nil { + return orig + } + + return output.String() +} diff --git a/vendor/github.com/jmoiron/sqlx/.gitignore b/vendor/github.com/jmoiron/sqlx/.gitignore new file mode 100644 index 0000000000..b2be23c87f --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/.gitignore @@ -0,0 +1,25 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +.idea + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +tags +environ diff --git a/vendor/github.com/jmoiron/sqlx/.travis.yml b/vendor/github.com/jmoiron/sqlx/.travis.yml new file mode 100644 index 0000000000..1cfa28cb35 --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/.travis.yml @@ -0,0 +1,26 @@ +# vim: ft=yaml sw=2 ts=2 + +language: go + +# enable database services +services: + - mysql + - postgresql + +# create test database +before_install: + - mysql -e 'CREATE DATABASE IF NOT EXISTS sqlxtest;' + - psql -c 'create database sqlxtest;' -U postgres + - go get github.com/mattn/goveralls + - export SQLX_MYSQL_DSN="travis:@/sqlxtest?parseTime=true" + - export SQLX_POSTGRES_DSN="postgres://postgres:@localhost/sqlxtest?sslmode=disable" + - export SQLX_SQLITE_DSN="$HOME/sqlxtest.db" + +# go versions to test +go: + - "1.15.x" + - "1.16.x" + +# run tests w/ coverage +script: + - travis_retry $GOPATH/bin/goveralls -service=travis-ci diff --git a/vendor/github.com/jmoiron/sqlx/README.md b/vendor/github.com/jmoiron/sqlx/README.md new file mode 100644 index 0000000000..0d7159290d --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/README.md @@ -0,0 +1,213 @@ +# sqlx + +[![Build Status](https://travis-ci.org/jmoiron/sqlx.svg?branch=master)](https://travis-ci.org/jmoiron/sqlx) [![Coverage Status](https://coveralls.io/repos/github/jmoiron/sqlx/badge.svg?branch=master)](https://coveralls.io/github/jmoiron/sqlx?branch=master) [![Godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/jmoiron/sqlx) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/jmoiron/sqlx/master/LICENSE) + +sqlx is a library which provides a set of extensions on go's standard +`database/sql` library. The sqlx versions of `sql.DB`, `sql.TX`, `sql.Stmt`, +et al. all leave the underlying interfaces untouched, so that their interfaces +are a superset on the standard ones. This makes it relatively painless to +integrate existing codebases using database/sql with sqlx. + +Major additional concepts are: + +* Marshal rows into structs (with embedded struct support), maps, and slices +* Named parameter support including prepared statements +* `Get` and `Select` to go quickly from query to struct/slice + +In addition to the [godoc API documentation](http://godoc.org/github.com/jmoiron/sqlx), +there is also some [user documentation](http://jmoiron.github.io/sqlx/) that +explains how to use `database/sql` along with sqlx. + +## Recent Changes + +1.3.0: + +* `sqlx.DB.Connx(context.Context) *sqlx.Conn` +* `sqlx.BindDriver(driverName, bindType)` +* support for `[]map[string]interface{}` to do "batch" insertions +* allocation & perf improvements for `sqlx.In` + +DB.Connx returns an `sqlx.Conn`, which is an `sql.Conn`-alike consistent with +sqlx's wrapping of other types. + +`BindDriver` allows users to control the bindvars that sqlx will use for drivers, +and add new drivers at runtime. This results in a very slight performance hit +when resolving the driver into a bind type (~40ns per call), but it allows users +to specify what bindtype their driver uses even when sqlx has not been updated +to know about it by default. + +### Backwards Compatibility + +Compatibility with the most recent two versions of Go is a requirement for any +new changes. Compatibility beyond that is not guaranteed. + +Versioning is done with Go modules. Breaking changes (eg. removing deprecated API) +will get major version number bumps. + +## install + + go get github.com/jmoiron/sqlx + +## issues + +Row headers can be ambiguous (`SELECT 1 AS a, 2 AS a`), and the result of +`Columns()` does not fully qualify column names in queries like: + +```sql +SELECT a.id, a.name, b.id, b.name FROM foos AS a JOIN foos AS b ON a.parent = b.id; +``` + +making a struct or map destination ambiguous. Use `AS` in your queries +to give columns distinct names, `rows.Scan` to scan them manually, or +`SliceScan` to get a slice of results. + +## usage + +Below is an example which shows some common use cases for sqlx. Check +[sqlx_test.go](https://github.com/jmoiron/sqlx/blob/master/sqlx_test.go) for more +usage. + + +```go +package main + +import ( + "database/sql" + "fmt" + "log" + + _ "github.com/lib/pq" + "github.com/jmoiron/sqlx" +) + +var schema = ` +CREATE TABLE person ( + first_name text, + last_name text, + email text +); + +CREATE TABLE place ( + country text, + city text NULL, + telcode integer +)` + +type Person struct { + FirstName string `db:"first_name"` + LastName string `db:"last_name"` + Email string +} + +type Place struct { + Country string + City sql.NullString + TelCode int +} + +func main() { + // this Pings the database trying to connect + // use sqlx.Open() for sql.Open() semantics + db, err := sqlx.Connect("postgres", "user=foo dbname=bar sslmode=disable") + if err != nil { + log.Fatalln(err) + } + + // exec the schema or fail; multi-statement Exec behavior varies between + // database drivers; pq will exec them all, sqlite3 won't, ymmv + db.MustExec(schema) + + tx := db.MustBegin() + tx.MustExec("INSERT INTO person (first_name, last_name, email) VALUES ($1, $2, $3)", "Jason", "Moiron", "jmoiron@jmoiron.net") + tx.MustExec("INSERT INTO person (first_name, last_name, email) VALUES ($1, $2, $3)", "John", "Doe", "johndoeDNE@gmail.net") + tx.MustExec("INSERT INTO place (country, city, telcode) VALUES ($1, $2, $3)", "United States", "New York", "1") + tx.MustExec("INSERT INTO place (country, telcode) VALUES ($1, $2)", "Hong Kong", "852") + tx.MustExec("INSERT INTO place (country, telcode) VALUES ($1, $2)", "Singapore", "65") + // Named queries can use structs, so if you have an existing struct (i.e. person := &Person{}) that you have populated, you can pass it in as &person + tx.NamedExec("INSERT INTO person (first_name, last_name, email) VALUES (:first_name, :last_name, :email)", &Person{"Jane", "Citizen", "jane.citzen@example.com"}) + tx.Commit() + + // Query the database, storing results in a []Person (wrapped in []interface{}) + people := []Person{} + db.Select(&people, "SELECT * FROM person ORDER BY first_name ASC") + jason, john := people[0], people[1] + + fmt.Printf("%#v\n%#v", jason, john) + // Person{FirstName:"Jason", LastName:"Moiron", Email:"jmoiron@jmoiron.net"} + // Person{FirstName:"John", LastName:"Doe", Email:"johndoeDNE@gmail.net"} + + // You can also get a single result, a la QueryRow + jason = Person{} + err = db.Get(&jason, "SELECT * FROM person WHERE first_name=$1", "Jason") + fmt.Printf("%#v\n", jason) + // Person{FirstName:"Jason", LastName:"Moiron", Email:"jmoiron@jmoiron.net"} + + // if you have null fields and use SELECT *, you must use sql.Null* in your struct + places := []Place{} + err = db.Select(&places, "SELECT * FROM place ORDER BY telcode ASC") + if err != nil { + fmt.Println(err) + return + } + usa, singsing, honkers := places[0], places[1], places[2] + + fmt.Printf("%#v\n%#v\n%#v\n", usa, singsing, honkers) + // Place{Country:"United States", City:sql.NullString{String:"New York", Valid:true}, TelCode:1} + // Place{Country:"Singapore", City:sql.NullString{String:"", Valid:false}, TelCode:65} + // Place{Country:"Hong Kong", City:sql.NullString{String:"", Valid:false}, TelCode:852} + + // Loop through rows using only one struct + place := Place{} + rows, err := db.Queryx("SELECT * FROM place") + for rows.Next() { + err := rows.StructScan(&place) + if err != nil { + log.Fatalln(err) + } + fmt.Printf("%#v\n", place) + } + // Place{Country:"United States", City:sql.NullString{String:"New York", Valid:true}, TelCode:1} + // Place{Country:"Hong Kong", City:sql.NullString{String:"", Valid:false}, TelCode:852} + // Place{Country:"Singapore", City:sql.NullString{String:"", Valid:false}, TelCode:65} + + // Named queries, using `:name` as the bindvar. Automatic bindvar support + // which takes into account the dbtype based on the driverName on sqlx.Open/Connect + _, err = db.NamedExec(`INSERT INTO person (first_name,last_name,email) VALUES (:first,:last,:email)`, + map[string]interface{}{ + "first": "Bin", + "last": "Smuth", + "email": "bensmith@allblacks.nz", + }) + + // Selects Mr. Smith from the database + rows, err = db.NamedQuery(`SELECT * FROM person WHERE first_name=:fn`, map[string]interface{}{"fn": "Bin"}) + + // Named queries can also use structs. Their bind names follow the same rules + // as the name -> db mapping, so struct fields are lowercased and the `db` tag + // is taken into consideration. + rows, err = db.NamedQuery(`SELECT * FROM person WHERE first_name=:first_name`, jason) + + + // batch insert + + // batch insert with structs + personStructs := []Person{ + {FirstName: "Ardie", LastName: "Savea", Email: "asavea@ab.co.nz"}, + {FirstName: "Sonny Bill", LastName: "Williams", Email: "sbw@ab.co.nz"}, + {FirstName: "Ngani", LastName: "Laumape", Email: "nlaumape@ab.co.nz"}, + } + + _, err = db.NamedExec(`INSERT INTO person (first_name, last_name, email) + VALUES (:first_name, :last_name, :email)`, personStructs) + + // batch insert with maps + personMaps := []map[string]interface{}{ + {"first_name": "Ardie", "last_name": "Savea", "email": "asavea@ab.co.nz"}, + {"first_name": "Sonny Bill", "last_name": "Williams", "email": "sbw@ab.co.nz"}, + {"first_name": "Ngani", "last_name": "Laumape", "email": "nlaumape@ab.co.nz"}, + } + + _, err = db.NamedExec(`INSERT INTO person (first_name, last_name, email) + VALUES (:first_name, :last_name, :email)`, personMaps) +} +``` diff --git a/vendor/github.com/jmoiron/sqlx/bind.go b/vendor/github.com/jmoiron/sqlx/bind.go new file mode 100644 index 0000000000..ec0da4e72e --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/bind.go @@ -0,0 +1,265 @@ +package sqlx + +import ( + "bytes" + "database/sql/driver" + "errors" + "reflect" + "strconv" + "strings" + "sync" + + "github.com/jmoiron/sqlx/reflectx" +) + +// Bindvar types supported by Rebind, BindMap and BindStruct. +const ( + UNKNOWN = iota + QUESTION + DOLLAR + NAMED + AT +) + +var defaultBinds = map[int][]string{ + DOLLAR: []string{"postgres", "pgx", "pq-timeouts", "cloudsqlpostgres", "ql", "nrpostgres", "cockroach"}, + QUESTION: []string{"mysql", "sqlite3", "nrmysql", "nrsqlite3"}, + NAMED: []string{"oci8", "ora", "goracle", "godror"}, + AT: []string{"sqlserver"}, +} + +var binds sync.Map + +func init() { + for bind, drivers := range defaultBinds { + for _, driver := range drivers { + BindDriver(driver, bind) + } + } + +} + +// BindType returns the bindtype for a given database given a drivername. +func BindType(driverName string) int { + itype, ok := binds.Load(driverName) + if !ok { + return UNKNOWN + } + return itype.(int) +} + +// BindDriver sets the BindType for driverName to bindType. +func BindDriver(driverName string, bindType int) { + binds.Store(driverName, bindType) +} + +// FIXME: this should be able to be tolerant of escaped ?'s in queries without +// losing much speed, and should be to avoid confusion. + +// Rebind a query from the default bindtype (QUESTION) to the target bindtype. +func Rebind(bindType int, query string) string { + switch bindType { + case QUESTION, UNKNOWN: + return query + } + + // Add space enough for 10 params before we have to allocate + rqb := make([]byte, 0, len(query)+10) + + var i, j int + + for i = strings.Index(query, "?"); i != -1; i = strings.Index(query, "?") { + rqb = append(rqb, query[:i]...) + + switch bindType { + case DOLLAR: + rqb = append(rqb, '$') + case NAMED: + rqb = append(rqb, ':', 'a', 'r', 'g') + case AT: + rqb = append(rqb, '@', 'p') + } + + j++ + rqb = strconv.AppendInt(rqb, int64(j), 10) + + query = query[i+1:] + } + + return string(append(rqb, query...)) +} + +// Experimental implementation of Rebind which uses a bytes.Buffer. The code is +// much simpler and should be more resistant to odd unicode, but it is twice as +// slow. Kept here for benchmarking purposes and to possibly replace Rebind if +// problems arise with its somewhat naive handling of unicode. +func rebindBuff(bindType int, query string) string { + if bindType != DOLLAR { + return query + } + + b := make([]byte, 0, len(query)) + rqb := bytes.NewBuffer(b) + j := 1 + for _, r := range query { + if r == '?' { + rqb.WriteRune('$') + rqb.WriteString(strconv.Itoa(j)) + j++ + } else { + rqb.WriteRune(r) + } + } + + return rqb.String() +} + +func asSliceForIn(i interface{}) (v reflect.Value, ok bool) { + if i == nil { + return reflect.Value{}, false + } + + v = reflect.ValueOf(i) + t := reflectx.Deref(v.Type()) + + // Only expand slices + if t.Kind() != reflect.Slice { + return reflect.Value{}, false + } + + // []byte is a driver.Value type so it should not be expanded + if t == reflect.TypeOf([]byte{}) { + return reflect.Value{}, false + + } + + return v, true +} + +// In expands slice values in args, returning the modified query string +// and a new arg list that can be executed by a database. The `query` should +// use the `?` bindVar. The return value uses the `?` bindVar. +func In(query string, args ...interface{}) (string, []interface{}, error) { + // argMeta stores reflect.Value and length for slices and + // the value itself for non-slice arguments + type argMeta struct { + v reflect.Value + i interface{} + length int + } + + var flatArgsCount int + var anySlices bool + + var stackMeta [32]argMeta + + var meta []argMeta + if len(args) <= len(stackMeta) { + meta = stackMeta[:len(args)] + } else { + meta = make([]argMeta, len(args)) + } + + for i, arg := range args { + if a, ok := arg.(driver.Valuer); ok { + var err error + arg, err = a.Value() + if err != nil { + return "", nil, err + } + } + + if v, ok := asSliceForIn(arg); ok { + meta[i].length = v.Len() + meta[i].v = v + + anySlices = true + flatArgsCount += meta[i].length + + if meta[i].length == 0 { + return "", nil, errors.New("empty slice passed to 'in' query") + } + } else { + meta[i].i = arg + flatArgsCount++ + } + } + + // don't do any parsing if there aren't any slices; note that this means + // some errors that we might have caught below will not be returned. + if !anySlices { + return query, args, nil + } + + newArgs := make([]interface{}, 0, flatArgsCount) + + var buf strings.Builder + buf.Grow(len(query) + len(", ?")*flatArgsCount) + + var arg, offset int + + for i := strings.IndexByte(query[offset:], '?'); i != -1; i = strings.IndexByte(query[offset:], '?') { + if arg >= len(meta) { + // if an argument wasn't passed, lets return an error; this is + // not actually how database/sql Exec/Query works, but since we are + // creating an argument list programmatically, we want to be able + // to catch these programmer errors earlier. + return "", nil, errors.New("number of bindVars exceeds arguments") + } + + argMeta := meta[arg] + arg++ + + // not a slice, continue. + // our questionmark will either be written before the next expansion + // of a slice or after the loop when writing the rest of the query + if argMeta.length == 0 { + offset = offset + i + 1 + newArgs = append(newArgs, argMeta.i) + continue + } + + // write everything up to and including our ? character + buf.WriteString(query[:offset+i+1]) + + for si := 1; si < argMeta.length; si++ { + buf.WriteString(", ?") + } + + newArgs = appendReflectSlice(newArgs, argMeta.v, argMeta.length) + + // slice the query and reset the offset. this avoids some bookkeeping for + // the write after the loop + query = query[offset+i+1:] + offset = 0 + } + + buf.WriteString(query) + + if arg < len(meta) { + return "", nil, errors.New("number of bindVars less than number arguments") + } + + return buf.String(), newArgs, nil +} + +func appendReflectSlice(args []interface{}, v reflect.Value, vlen int) []interface{} { + switch val := v.Interface().(type) { + case []interface{}: + args = append(args, val...) + case []int: + for i := range val { + args = append(args, val[i]) + } + case []string: + for i := range val { + args = append(args, val[i]) + } + default: + for si := 0; si < vlen; si++ { + args = append(args, v.Index(si).Interface()) + } + } + + return args +} diff --git a/vendor/github.com/jmoiron/sqlx/doc.go b/vendor/github.com/jmoiron/sqlx/doc.go new file mode 100644 index 0000000000..e2b4e60b2e --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/doc.go @@ -0,0 +1,12 @@ +// Package sqlx provides general purpose extensions to database/sql. +// +// It is intended to seamlessly wrap database/sql and provide convenience +// methods which are useful in the development of database driven applications. +// None of the underlying database/sql methods are changed. Instead all extended +// behavior is implemented through new methods defined on wrapper types. +// +// Additions include scanning into structs, named query support, rebinding +// queries for different drivers, convenient shorthands for common error handling +// and more. +// +package sqlx diff --git a/vendor/github.com/jmoiron/sqlx/named.go b/vendor/github.com/jmoiron/sqlx/named.go new file mode 100644 index 0000000000..728aa04d04 --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/named.go @@ -0,0 +1,458 @@ +package sqlx + +// Named Query Support +// +// * BindMap - bind query bindvars to map/struct args +// * NamedExec, NamedQuery - named query w/ struct or map +// * NamedStmt - a pre-compiled named query which is a prepared statement +// +// Internal Interfaces: +// +// * compileNamedQuery - rebind a named query, returning a query and list of names +// * bindArgs, bindMapArgs, bindAnyArgs - given a list of names, return an arglist +// +import ( + "bytes" + "database/sql" + "errors" + "fmt" + "reflect" + "regexp" + "strconv" + "unicode" + + "github.com/jmoiron/sqlx/reflectx" +) + +// NamedStmt is a prepared statement that executes named queries. Prepare it +// how you would execute a NamedQuery, but pass in a struct or map when executing. +type NamedStmt struct { + Params []string + QueryString string + Stmt *Stmt +} + +// Close closes the named statement. +func (n *NamedStmt) Close() error { + return n.Stmt.Close() +} + +// Exec executes a named statement using the struct passed. +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) Exec(arg interface{}) (sql.Result, error) { + args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) + if err != nil { + return *new(sql.Result), err + } + return n.Stmt.Exec(args...) +} + +// Query executes a named statement using the struct argument, returning rows. +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) Query(arg interface{}) (*sql.Rows, error) { + args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) + if err != nil { + return nil, err + } + return n.Stmt.Query(args...) +} + +// QueryRow executes a named statement against the database. Because sqlx cannot +// create a *sql.Row with an error condition pre-set for binding errors, sqlx +// returns a *sqlx.Row instead. +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) QueryRow(arg interface{}) *Row { + args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) + if err != nil { + return &Row{err: err} + } + return n.Stmt.QueryRowx(args...) +} + +// MustExec execs a NamedStmt, panicing on error +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) MustExec(arg interface{}) sql.Result { + res, err := n.Exec(arg) + if err != nil { + panic(err) + } + return res +} + +// Queryx using this NamedStmt +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) Queryx(arg interface{}) (*Rows, error) { + r, err := n.Query(arg) + if err != nil { + return nil, err + } + return &Rows{Rows: r, Mapper: n.Stmt.Mapper, unsafe: isUnsafe(n)}, err +} + +// QueryRowx this NamedStmt. Because of limitations with QueryRow, this is +// an alias for QueryRow. +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) QueryRowx(arg interface{}) *Row { + return n.QueryRow(arg) +} + +// Select using this NamedStmt +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) Select(dest interface{}, arg interface{}) error { + rows, err := n.Queryx(arg) + if err != nil { + return err + } + // if something happens here, we want to make sure the rows are Closed + defer rows.Close() + return scanAll(rows, dest, false) +} + +// Get using this NamedStmt +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) Get(dest interface{}, arg interface{}) error { + r := n.QueryRowx(arg) + return r.scanAny(dest, false) +} + +// Unsafe creates an unsafe version of the NamedStmt +func (n *NamedStmt) Unsafe() *NamedStmt { + r := &NamedStmt{Params: n.Params, Stmt: n.Stmt, QueryString: n.QueryString} + r.Stmt.unsafe = true + return r +} + +// A union interface of preparer and binder, required to be able to prepare +// named statements (as the bindtype must be determined). +type namedPreparer interface { + Preparer + binder +} + +func prepareNamed(p namedPreparer, query string) (*NamedStmt, error) { + bindType := BindType(p.DriverName()) + q, args, err := compileNamedQuery([]byte(query), bindType) + if err != nil { + return nil, err + } + stmt, err := Preparex(p, q) + if err != nil { + return nil, err + } + return &NamedStmt{ + QueryString: q, + Params: args, + Stmt: stmt, + }, nil +} + +// convertMapStringInterface attempts to convert v to map[string]interface{}. +// Unlike v.(map[string]interface{}), this function works on named types that +// are convertible to map[string]interface{} as well. +func convertMapStringInterface(v interface{}) (map[string]interface{}, bool) { + var m map[string]interface{} + mtype := reflect.TypeOf(m) + t := reflect.TypeOf(v) + if !t.ConvertibleTo(mtype) { + return nil, false + } + return reflect.ValueOf(v).Convert(mtype).Interface().(map[string]interface{}), true + +} + +func bindAnyArgs(names []string, arg interface{}, m *reflectx.Mapper) ([]interface{}, error) { + if maparg, ok := convertMapStringInterface(arg); ok { + return bindMapArgs(names, maparg) + } + return bindArgs(names, arg, m) +} + +// private interface to generate a list of interfaces from a given struct +// type, given a list of names to pull out of the struct. Used by public +// BindStruct interface. +func bindArgs(names []string, arg interface{}, m *reflectx.Mapper) ([]interface{}, error) { + arglist := make([]interface{}, 0, len(names)) + + // grab the indirected value of arg + v := reflect.ValueOf(arg) + for v = reflect.ValueOf(arg); v.Kind() == reflect.Ptr; { + v = v.Elem() + } + + err := m.TraversalsByNameFunc(v.Type(), names, func(i int, t []int) error { + if len(t) == 0 { + return fmt.Errorf("could not find name %s in %#v", names[i], arg) + } + + val := reflectx.FieldByIndexesReadOnly(v, t) + arglist = append(arglist, val.Interface()) + + return nil + }) + + return arglist, err +} + +// like bindArgs, but for maps. +func bindMapArgs(names []string, arg map[string]interface{}) ([]interface{}, error) { + arglist := make([]interface{}, 0, len(names)) + + for _, name := range names { + val, ok := arg[name] + if !ok { + return arglist, fmt.Errorf("could not find name %s in %#v", name, arg) + } + arglist = append(arglist, val) + } + return arglist, nil +} + +// bindStruct binds a named parameter query with fields from a struct argument. +// The rules for binding field names to parameter names follow the same +// conventions as for StructScan, including obeying the `db` struct tags. +func bindStruct(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) { + bound, names, err := compileNamedQuery([]byte(query), bindType) + if err != nil { + return "", []interface{}{}, err + } + + arglist, err := bindAnyArgs(names, arg, m) + if err != nil { + return "", []interface{}{}, err + } + + return bound, arglist, nil +} + +var valuesReg = regexp.MustCompile(`\)\s*(?i)VALUES\s*\(`) + +func findMatchingClosingBracketIndex(s string) int { + count := 0 + for i, ch := range s { + if ch == '(' { + count++ + } + if ch == ')' { + count-- + if count == 0 { + return i + } + } + } + return 0 +} + +func fixBound(bound string, loop int) string { + loc := valuesReg.FindStringIndex(bound) + // defensive guard when "VALUES (...)" not found + if len(loc) < 2 { + return bound + } + + openingBracketIndex := loc[1] - 1 + index := findMatchingClosingBracketIndex(bound[openingBracketIndex:]) + // defensive guard. must have closing bracket + if index == 0 { + return bound + } + closingBracketIndex := openingBracketIndex + index + 1 + + var buffer bytes.Buffer + + buffer.WriteString(bound[0:closingBracketIndex]) + for i := 0; i < loop-1; i++ { + buffer.WriteString(",") + buffer.WriteString(bound[openingBracketIndex:closingBracketIndex]) + } + buffer.WriteString(bound[closingBracketIndex:]) + return buffer.String() +} + +// bindArray binds a named parameter query with fields from an array or slice of +// structs argument. +func bindArray(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) { + // do the initial binding with QUESTION; if bindType is not question, + // we can rebind it at the end. + bound, names, err := compileNamedQuery([]byte(query), QUESTION) + if err != nil { + return "", []interface{}{}, err + } + arrayValue := reflect.ValueOf(arg) + arrayLen := arrayValue.Len() + if arrayLen == 0 { + return "", []interface{}{}, fmt.Errorf("length of array is 0: %#v", arg) + } + var arglist = make([]interface{}, 0, len(names)*arrayLen) + for i := 0; i < arrayLen; i++ { + elemArglist, err := bindAnyArgs(names, arrayValue.Index(i).Interface(), m) + if err != nil { + return "", []interface{}{}, err + } + arglist = append(arglist, elemArglist...) + } + if arrayLen > 1 { + bound = fixBound(bound, arrayLen) + } + // adjust binding type if we weren't on question + if bindType != QUESTION { + bound = Rebind(bindType, bound) + } + return bound, arglist, nil +} + +// bindMap binds a named parameter query with a map of arguments. +func bindMap(bindType int, query string, args map[string]interface{}) (string, []interface{}, error) { + bound, names, err := compileNamedQuery([]byte(query), bindType) + if err != nil { + return "", []interface{}{}, err + } + + arglist, err := bindMapArgs(names, args) + return bound, arglist, err +} + +// -- Compilation of Named Queries + +// Allow digits and letters in bind params; additionally runes are +// checked against underscores, meaning that bind params can have be +// alphanumeric with underscores. Mind the difference between unicode +// digits and numbers, where '5' is a digit but '五' is not. +var allowedBindRunes = []*unicode.RangeTable{unicode.Letter, unicode.Digit} + +// FIXME: this function isn't safe for unicode named params, as a failing test +// can testify. This is not a regression but a failure of the original code +// as well. It should be modified to range over runes in a string rather than +// bytes, even though this is less convenient and slower. Hopefully the +// addition of the prepared NamedStmt (which will only do this once) will make +// up for the slightly slower ad-hoc NamedExec/NamedQuery. + +// compile a NamedQuery into an unbound query (using the '?' bindvar) and +// a list of names. +func compileNamedQuery(qs []byte, bindType int) (query string, names []string, err error) { + names = make([]string, 0, 10) + rebound := make([]byte, 0, len(qs)) + + inName := false + last := len(qs) - 1 + currentVar := 1 + name := make([]byte, 0, 10) + + for i, b := range qs { + // a ':' while we're in a name is an error + if b == ':' { + // if this is the second ':' in a '::' escape sequence, append a ':' + if inName && i > 0 && qs[i-1] == ':' { + rebound = append(rebound, ':') + inName = false + continue + } else if inName { + err = errors.New("unexpected `:` while reading named param at " + strconv.Itoa(i)) + return query, names, err + } + inName = true + name = []byte{} + } else if inName && i > 0 && b == '=' && len(name) == 0 { + rebound = append(rebound, ':', '=') + inName = false + continue + // if we're in a name, and this is an allowed character, continue + } else if inName && (unicode.IsOneOf(allowedBindRunes, rune(b)) || b == '_' || b == '.') && i != last { + // append the byte to the name if we are in a name and not on the last byte + name = append(name, b) + // if we're in a name and it's not an allowed character, the name is done + } else if inName { + inName = false + // if this is the final byte of the string and it is part of the name, then + // make sure to add it to the name + if i == last && unicode.IsOneOf(allowedBindRunes, rune(b)) { + name = append(name, b) + } + // add the string representation to the names list + names = append(names, string(name)) + // add a proper bindvar for the bindType + switch bindType { + // oracle only supports named type bind vars even for positional + case NAMED: + rebound = append(rebound, ':') + rebound = append(rebound, name...) + case QUESTION, UNKNOWN: + rebound = append(rebound, '?') + case DOLLAR: + rebound = append(rebound, '$') + for _, b := range strconv.Itoa(currentVar) { + rebound = append(rebound, byte(b)) + } + currentVar++ + case AT: + rebound = append(rebound, '@', 'p') + for _, b := range strconv.Itoa(currentVar) { + rebound = append(rebound, byte(b)) + } + currentVar++ + } + // add this byte to string unless it was not part of the name + if i != last { + rebound = append(rebound, b) + } else if !unicode.IsOneOf(allowedBindRunes, rune(b)) { + rebound = append(rebound, b) + } + } else { + // this is a normal byte and should just go onto the rebound query + rebound = append(rebound, b) + } + } + + return string(rebound), names, err +} + +// BindNamed binds a struct or a map to a query with named parameters. +// DEPRECATED: use sqlx.Named` instead of this, it may be removed in future. +func BindNamed(bindType int, query string, arg interface{}) (string, []interface{}, error) { + return bindNamedMapper(bindType, query, arg, mapper()) +} + +// Named takes a query using named parameters and an argument and +// returns a new query with a list of args that can be executed by +// a database. The return value uses the `?` bindvar. +func Named(query string, arg interface{}) (string, []interface{}, error) { + return bindNamedMapper(QUESTION, query, arg, mapper()) +} + +func bindNamedMapper(bindType int, query string, arg interface{}, m *reflectx.Mapper) (string, []interface{}, error) { + t := reflect.TypeOf(arg) + k := t.Kind() + switch { + case k == reflect.Map && t.Key().Kind() == reflect.String: + m, ok := convertMapStringInterface(arg) + if !ok { + return "", nil, fmt.Errorf("sqlx.bindNamedMapper: unsupported map type: %T", arg) + } + return bindMap(bindType, query, m) + case k == reflect.Array || k == reflect.Slice: + return bindArray(bindType, query, arg, m) + default: + return bindStruct(bindType, query, arg, m) + } +} + +// NamedQuery binds a named query and then runs Query on the result using the +// provided Ext (sqlx.Tx, sqlx.Db). It works with both structs and with +// map[string]interface{} types. +func NamedQuery(e Ext, query string, arg interface{}) (*Rows, error) { + q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e)) + if err != nil { + return nil, err + } + return e.Queryx(q, args...) +} + +// NamedExec uses BindStruct to get a query executable by the driver and +// then runs Exec on the result. Returns an error from the binding +// or the query execution itself. +func NamedExec(e Ext, query string, arg interface{}) (sql.Result, error) { + q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e)) + if err != nil { + return nil, err + } + return e.Exec(q, args...) +} diff --git a/vendor/github.com/jmoiron/sqlx/named_context.go b/vendor/github.com/jmoiron/sqlx/named_context.go new file mode 100644 index 0000000000..07ad2165d0 --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/named_context.go @@ -0,0 +1,132 @@ +// +build go1.8 + +package sqlx + +import ( + "context" + "database/sql" +) + +// A union interface of contextPreparer and binder, required to be able to +// prepare named statements with context (as the bindtype must be determined). +type namedPreparerContext interface { + PreparerContext + binder +} + +func prepareNamedContext(ctx context.Context, p namedPreparerContext, query string) (*NamedStmt, error) { + bindType := BindType(p.DriverName()) + q, args, err := compileNamedQuery([]byte(query), bindType) + if err != nil { + return nil, err + } + stmt, err := PreparexContext(ctx, p, q) + if err != nil { + return nil, err + } + return &NamedStmt{ + QueryString: q, + Params: args, + Stmt: stmt, + }, nil +} + +// ExecContext executes a named statement using the struct passed. +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) ExecContext(ctx context.Context, arg interface{}) (sql.Result, error) { + args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) + if err != nil { + return *new(sql.Result), err + } + return n.Stmt.ExecContext(ctx, args...) +} + +// QueryContext executes a named statement using the struct argument, returning rows. +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) QueryContext(ctx context.Context, arg interface{}) (*sql.Rows, error) { + args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) + if err != nil { + return nil, err + } + return n.Stmt.QueryContext(ctx, args...) +} + +// QueryRowContext executes a named statement against the database. Because sqlx cannot +// create a *sql.Row with an error condition pre-set for binding errors, sqlx +// returns a *sqlx.Row instead. +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) QueryRowContext(ctx context.Context, arg interface{}) *Row { + args, err := bindAnyArgs(n.Params, arg, n.Stmt.Mapper) + if err != nil { + return &Row{err: err} + } + return n.Stmt.QueryRowxContext(ctx, args...) +} + +// MustExecContext execs a NamedStmt, panicing on error +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) MustExecContext(ctx context.Context, arg interface{}) sql.Result { + res, err := n.ExecContext(ctx, arg) + if err != nil { + panic(err) + } + return res +} + +// QueryxContext using this NamedStmt +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) QueryxContext(ctx context.Context, arg interface{}) (*Rows, error) { + r, err := n.QueryContext(ctx, arg) + if err != nil { + return nil, err + } + return &Rows{Rows: r, Mapper: n.Stmt.Mapper, unsafe: isUnsafe(n)}, err +} + +// QueryRowxContext this NamedStmt. Because of limitations with QueryRow, this is +// an alias for QueryRow. +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) QueryRowxContext(ctx context.Context, arg interface{}) *Row { + return n.QueryRowContext(ctx, arg) +} + +// SelectContext using this NamedStmt +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) SelectContext(ctx context.Context, dest interface{}, arg interface{}) error { + rows, err := n.QueryxContext(ctx, arg) + if err != nil { + return err + } + // if something happens here, we want to make sure the rows are Closed + defer rows.Close() + return scanAll(rows, dest, false) +} + +// GetContext using this NamedStmt +// Any named placeholder parameters are replaced with fields from arg. +func (n *NamedStmt) GetContext(ctx context.Context, dest interface{}, arg interface{}) error { + r := n.QueryRowxContext(ctx, arg) + return r.scanAny(dest, false) +} + +// NamedQueryContext binds a named query and then runs Query on the result using the +// provided Ext (sqlx.Tx, sqlx.Db). It works with both structs and with +// map[string]interface{} types. +func NamedQueryContext(ctx context.Context, e ExtContext, query string, arg interface{}) (*Rows, error) { + q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e)) + if err != nil { + return nil, err + } + return e.QueryxContext(ctx, q, args...) +} + +// NamedExecContext uses BindStruct to get a query executable by the driver and +// then runs Exec on the result. Returns an error from the binding +// or the query execution itself. +func NamedExecContext(ctx context.Context, e ExtContext, query string, arg interface{}) (sql.Result, error) { + q, args, err := bindNamedMapper(BindType(e.DriverName()), query, arg, mapperFor(e)) + if err != nil { + return nil, err + } + return e.ExecContext(ctx, q, args...) +} diff --git a/vendor/github.com/jmoiron/sqlx/reflectx/README.md b/vendor/github.com/jmoiron/sqlx/reflectx/README.md new file mode 100644 index 0000000000..f01d3d1f08 --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/reflectx/README.md @@ -0,0 +1,17 @@ +# reflectx + +The sqlx package has special reflect needs. In particular, it needs to: + +* be able to map a name to a field +* understand embedded structs +* understand mapping names to fields by a particular tag +* user specified name -> field mapping functions + +These behaviors mimic the behaviors by the standard library marshallers and also the +behavior of standard Go accessors. + +The first two are amply taken care of by `Reflect.Value.FieldByName`, and the third is +addressed by `Reflect.Value.FieldByNameFunc`, but these don't quite understand struct +tags in the ways that are vital to most marshallers, and they are slow. + +This reflectx package extends reflect to achieve these goals. diff --git a/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go b/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go new file mode 100644 index 0000000000..0b1099428d --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/reflectx/reflect.go @@ -0,0 +1,444 @@ +// Package reflectx implements extensions to the standard reflect lib suitable +// for implementing marshalling and unmarshalling packages. The main Mapper type +// allows for Go-compatible named attribute access, including accessing embedded +// struct attributes and the ability to use functions and struct tags to +// customize field names. +// +package reflectx + +import ( + "reflect" + "runtime" + "strings" + "sync" +) + +// A FieldInfo is metadata for a struct field. +type FieldInfo struct { + Index []int + Path string + Field reflect.StructField + Zero reflect.Value + Name string + Options map[string]string + Embedded bool + Children []*FieldInfo + Parent *FieldInfo +} + +// A StructMap is an index of field metadata for a struct. +type StructMap struct { + Tree *FieldInfo + Index []*FieldInfo + Paths map[string]*FieldInfo + Names map[string]*FieldInfo +} + +// GetByPath returns a *FieldInfo for a given string path. +func (f StructMap) GetByPath(path string) *FieldInfo { + return f.Paths[path] +} + +// GetByTraversal returns a *FieldInfo for a given integer path. It is +// analogous to reflect.FieldByIndex, but using the cached traversal +// rather than re-executing the reflect machinery each time. +func (f StructMap) GetByTraversal(index []int) *FieldInfo { + if len(index) == 0 { + return nil + } + + tree := f.Tree + for _, i := range index { + if i >= len(tree.Children) || tree.Children[i] == nil { + return nil + } + tree = tree.Children[i] + } + return tree +} + +// Mapper is a general purpose mapper of names to struct fields. A Mapper +// behaves like most marshallers in the standard library, obeying a field tag +// for name mapping but also providing a basic transform function. +type Mapper struct { + cache map[reflect.Type]*StructMap + tagName string + tagMapFunc func(string) string + mapFunc func(string) string + mutex sync.Mutex +} + +// NewMapper returns a new mapper using the tagName as its struct field tag. +// If tagName is the empty string, it is ignored. +func NewMapper(tagName string) *Mapper { + return &Mapper{ + cache: make(map[reflect.Type]*StructMap), + tagName: tagName, + } +} + +// NewMapperTagFunc returns a new mapper which contains a mapper for field names +// AND a mapper for tag values. This is useful for tags like json which can +// have values like "name,omitempty". +func NewMapperTagFunc(tagName string, mapFunc, tagMapFunc func(string) string) *Mapper { + return &Mapper{ + cache: make(map[reflect.Type]*StructMap), + tagName: tagName, + mapFunc: mapFunc, + tagMapFunc: tagMapFunc, + } +} + +// NewMapperFunc returns a new mapper which optionally obeys a field tag and +// a struct field name mapper func given by f. Tags will take precedence, but +// for any other field, the mapped name will be f(field.Name) +func NewMapperFunc(tagName string, f func(string) string) *Mapper { + return &Mapper{ + cache: make(map[reflect.Type]*StructMap), + tagName: tagName, + mapFunc: f, + } +} + +// TypeMap returns a mapping of field strings to int slices representing +// the traversal down the struct to reach the field. +func (m *Mapper) TypeMap(t reflect.Type) *StructMap { + m.mutex.Lock() + mapping, ok := m.cache[t] + if !ok { + mapping = getMapping(t, m.tagName, m.mapFunc, m.tagMapFunc) + m.cache[t] = mapping + } + m.mutex.Unlock() + return mapping +} + +// FieldMap returns the mapper's mapping of field names to reflect values. Panics +// if v's Kind is not Struct, or v is not Indirectable to a struct kind. +func (m *Mapper) FieldMap(v reflect.Value) map[string]reflect.Value { + v = reflect.Indirect(v) + mustBe(v, reflect.Struct) + + r := map[string]reflect.Value{} + tm := m.TypeMap(v.Type()) + for tagName, fi := range tm.Names { + r[tagName] = FieldByIndexes(v, fi.Index) + } + return r +} + +// FieldByName returns a field by its mapped name as a reflect.Value. +// Panics if v's Kind is not Struct or v is not Indirectable to a struct Kind. +// Returns zero Value if the name is not found. +func (m *Mapper) FieldByName(v reflect.Value, name string) reflect.Value { + v = reflect.Indirect(v) + mustBe(v, reflect.Struct) + + tm := m.TypeMap(v.Type()) + fi, ok := tm.Names[name] + if !ok { + return v + } + return FieldByIndexes(v, fi.Index) +} + +// FieldsByName returns a slice of values corresponding to the slice of names +// for the value. Panics if v's Kind is not Struct or v is not Indirectable +// to a struct Kind. Returns zero Value for each name not found. +func (m *Mapper) FieldsByName(v reflect.Value, names []string) []reflect.Value { + v = reflect.Indirect(v) + mustBe(v, reflect.Struct) + + tm := m.TypeMap(v.Type()) + vals := make([]reflect.Value, 0, len(names)) + for _, name := range names { + fi, ok := tm.Names[name] + if !ok { + vals = append(vals, *new(reflect.Value)) + } else { + vals = append(vals, FieldByIndexes(v, fi.Index)) + } + } + return vals +} + +// TraversalsByName returns a slice of int slices which represent the struct +// traversals for each mapped name. Panics if t is not a struct or Indirectable +// to a struct. Returns empty int slice for each name not found. +func (m *Mapper) TraversalsByName(t reflect.Type, names []string) [][]int { + r := make([][]int, 0, len(names)) + m.TraversalsByNameFunc(t, names, func(_ int, i []int) error { + if i == nil { + r = append(r, []int{}) + } else { + r = append(r, i) + } + + return nil + }) + return r +} + +// TraversalsByNameFunc traverses the mapped names and calls fn with the index of +// each name and the struct traversal represented by that name. Panics if t is not +// a struct or Indirectable to a struct. Returns the first error returned by fn or nil. +func (m *Mapper) TraversalsByNameFunc(t reflect.Type, names []string, fn func(int, []int) error) error { + t = Deref(t) + mustBe(t, reflect.Struct) + tm := m.TypeMap(t) + for i, name := range names { + fi, ok := tm.Names[name] + if !ok { + if err := fn(i, nil); err != nil { + return err + } + } else { + if err := fn(i, fi.Index); err != nil { + return err + } + } + } + return nil +} + +// FieldByIndexes returns a value for the field given by the struct traversal +// for the given value. +func FieldByIndexes(v reflect.Value, indexes []int) reflect.Value { + for _, i := range indexes { + v = reflect.Indirect(v).Field(i) + // if this is a pointer and it's nil, allocate a new value and set it + if v.Kind() == reflect.Ptr && v.IsNil() { + alloc := reflect.New(Deref(v.Type())) + v.Set(alloc) + } + if v.Kind() == reflect.Map && v.IsNil() { + v.Set(reflect.MakeMap(v.Type())) + } + } + return v +} + +// FieldByIndexesReadOnly returns a value for a particular struct traversal, +// but is not concerned with allocating nil pointers because the value is +// going to be used for reading and not setting. +func FieldByIndexesReadOnly(v reflect.Value, indexes []int) reflect.Value { + for _, i := range indexes { + v = reflect.Indirect(v).Field(i) + } + return v +} + +// Deref is Indirect for reflect.Types +func Deref(t reflect.Type) reflect.Type { + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + return t +} + +// -- helpers & utilities -- + +type kinder interface { + Kind() reflect.Kind +} + +// mustBe checks a value against a kind, panicing with a reflect.ValueError +// if the kind isn't that which is required. +func mustBe(v kinder, expected reflect.Kind) { + if k := v.Kind(); k != expected { + panic(&reflect.ValueError{Method: methodName(), Kind: k}) + } +} + +// methodName returns the caller of the function calling methodName +func methodName() string { + pc, _, _, _ := runtime.Caller(2) + f := runtime.FuncForPC(pc) + if f == nil { + return "unknown method" + } + return f.Name() +} + +type typeQueue struct { + t reflect.Type + fi *FieldInfo + pp string // Parent path +} + +// A copying append that creates a new slice each time. +func apnd(is []int, i int) []int { + x := make([]int, len(is)+1) + copy(x, is) + x[len(x)-1] = i + return x +} + +type mapf func(string) string + +// parseName parses the tag and the target name for the given field using +// the tagName (eg 'json' for `json:"foo"` tags), mapFunc for mapping the +// field's name to a target name, and tagMapFunc for mapping the tag to +// a target name. +func parseName(field reflect.StructField, tagName string, mapFunc, tagMapFunc mapf) (tag, fieldName string) { + // first, set the fieldName to the field's name + fieldName = field.Name + // if a mapFunc is set, use that to override the fieldName + if mapFunc != nil { + fieldName = mapFunc(fieldName) + } + + // if there's no tag to look for, return the field name + if tagName == "" { + return "", fieldName + } + + // if this tag is not set using the normal convention in the tag, + // then return the fieldname.. this check is done because according + // to the reflect documentation: + // If the tag does not have the conventional format, + // the value returned by Get is unspecified. + // which doesn't sound great. + if !strings.Contains(string(field.Tag), tagName+":") { + return "", fieldName + } + + // at this point we're fairly sure that we have a tag, so lets pull it out + tag = field.Tag.Get(tagName) + + // if we have a mapper function, call it on the whole tag + // XXX: this is a change from the old version, which pulled out the name + // before the tagMapFunc could be run, but I think this is the right way + if tagMapFunc != nil { + tag = tagMapFunc(tag) + } + + // finally, split the options from the name + parts := strings.Split(tag, ",") + fieldName = parts[0] + + return tag, fieldName +} + +// parseOptions parses options out of a tag string, skipping the name +func parseOptions(tag string) map[string]string { + parts := strings.Split(tag, ",") + options := make(map[string]string, len(parts)) + if len(parts) > 1 { + for _, opt := range parts[1:] { + // short circuit potentially expensive split op + if strings.Contains(opt, "=") { + kv := strings.Split(opt, "=") + options[kv[0]] = kv[1] + continue + } + options[opt] = "" + } + } + return options +} + +// getMapping returns a mapping for the t type, using the tagName, mapFunc and +// tagMapFunc to determine the canonical names of fields. +func getMapping(t reflect.Type, tagName string, mapFunc, tagMapFunc mapf) *StructMap { + m := []*FieldInfo{} + + root := &FieldInfo{} + queue := []typeQueue{} + queue = append(queue, typeQueue{Deref(t), root, ""}) + +QueueLoop: + for len(queue) != 0 { + // pop the first item off of the queue + tq := queue[0] + queue = queue[1:] + + // ignore recursive field + for p := tq.fi.Parent; p != nil; p = p.Parent { + if tq.fi.Field.Type == p.Field.Type { + continue QueueLoop + } + } + + nChildren := 0 + if tq.t.Kind() == reflect.Struct { + nChildren = tq.t.NumField() + } + tq.fi.Children = make([]*FieldInfo, nChildren) + + // iterate through all of its fields + for fieldPos := 0; fieldPos < nChildren; fieldPos++ { + + f := tq.t.Field(fieldPos) + + // parse the tag and the target name using the mapping options for this field + tag, name := parseName(f, tagName, mapFunc, tagMapFunc) + + // if the name is "-", disabled via a tag, skip it + if name == "-" { + continue + } + + fi := FieldInfo{ + Field: f, + Name: name, + Zero: reflect.New(f.Type).Elem(), + Options: parseOptions(tag), + } + + // if the path is empty this path is just the name + if tq.pp == "" { + fi.Path = fi.Name + } else { + fi.Path = tq.pp + "." + fi.Name + } + + // skip unexported fields + if len(f.PkgPath) != 0 && !f.Anonymous { + continue + } + + // bfs search of anonymous embedded structs + if f.Anonymous { + pp := tq.pp + if tag != "" { + pp = fi.Path + } + + fi.Embedded = true + fi.Index = apnd(tq.fi.Index, fieldPos) + nChildren := 0 + ft := Deref(f.Type) + if ft.Kind() == reflect.Struct { + nChildren = ft.NumField() + } + fi.Children = make([]*FieldInfo, nChildren) + queue = append(queue, typeQueue{Deref(f.Type), &fi, pp}) + } else if fi.Zero.Kind() == reflect.Struct || (fi.Zero.Kind() == reflect.Ptr && fi.Zero.Type().Elem().Kind() == reflect.Struct) { + fi.Index = apnd(tq.fi.Index, fieldPos) + fi.Children = make([]*FieldInfo, Deref(f.Type).NumField()) + queue = append(queue, typeQueue{Deref(f.Type), &fi, fi.Path}) + } + + fi.Index = apnd(tq.fi.Index, fieldPos) + fi.Parent = tq.fi + tq.fi.Children[fieldPos] = &fi + m = append(m, &fi) + } + } + + flds := &StructMap{Index: m, Tree: root, Paths: map[string]*FieldInfo{}, Names: map[string]*FieldInfo{}} + for _, fi := range flds.Index { + // check if nothing has already been pushed with the same path + // sometimes you can choose to override a type using embedded struct + fld, ok := flds.Paths[fi.Path] + if !ok || fld.Embedded { + flds.Paths[fi.Path] = fi + if fi.Name != "" && !fi.Embedded { + flds.Names[fi.Path] = fi + } + } + } + + return flds +} diff --git a/vendor/github.com/jmoiron/sqlx/sqlx.go b/vendor/github.com/jmoiron/sqlx/sqlx.go new file mode 100644 index 0000000000..112ef70e9c --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/sqlx.go @@ -0,0 +1,1049 @@ +package sqlx + +import ( + "database/sql" + "database/sql/driver" + "errors" + "fmt" + + "io/ioutil" + "path/filepath" + "reflect" + "strings" + "sync" + + "github.com/jmoiron/sqlx/reflectx" +) + +// Although the NameMapper is convenient, in practice it should not +// be relied on except for application code. If you are writing a library +// that uses sqlx, you should be aware that the name mappings you expect +// can be overridden by your user's application. + +// NameMapper is used to map column names to struct field names. By default, +// it uses strings.ToLower to lowercase struct field names. It can be set +// to whatever you want, but it is encouraged to be set before sqlx is used +// as name-to-field mappings are cached after first use on a type. +var NameMapper = strings.ToLower +var origMapper = reflect.ValueOf(NameMapper) + +// Rather than creating on init, this is created when necessary so that +// importers have time to customize the NameMapper. +var mpr *reflectx.Mapper + +// mprMu protects mpr. +var mprMu sync.Mutex + +// mapper returns a valid mapper using the configured NameMapper func. +func mapper() *reflectx.Mapper { + mprMu.Lock() + defer mprMu.Unlock() + + if mpr == nil { + mpr = reflectx.NewMapperFunc("db", NameMapper) + } else if origMapper != reflect.ValueOf(NameMapper) { + // if NameMapper has changed, create a new mapper + mpr = reflectx.NewMapperFunc("db", NameMapper) + origMapper = reflect.ValueOf(NameMapper) + } + return mpr +} + +// isScannable takes the reflect.Type and the actual dest value and returns +// whether or not it's Scannable. Something is scannable if: +// * it is not a struct +// * it implements sql.Scanner +// * it has no exported fields +func isScannable(t reflect.Type) bool { + if reflect.PtrTo(t).Implements(_scannerInterface) { + return true + } + if t.Kind() != reflect.Struct { + return true + } + + // it's not important that we use the right mapper for this particular object, + // we're only concerned on how many exported fields this struct has + return len(mapper().TypeMap(t).Index) == 0 +} + +// ColScanner is an interface used by MapScan and SliceScan +type ColScanner interface { + Columns() ([]string, error) + Scan(dest ...interface{}) error + Err() error +} + +// Queryer is an interface used by Get and Select +type Queryer interface { + Query(query string, args ...interface{}) (*sql.Rows, error) + Queryx(query string, args ...interface{}) (*Rows, error) + QueryRowx(query string, args ...interface{}) *Row +} + +// Execer is an interface used by MustExec and LoadFile +type Execer interface { + Exec(query string, args ...interface{}) (sql.Result, error) +} + +// Binder is an interface for something which can bind queries (Tx, DB) +type binder interface { + DriverName() string + Rebind(string) string + BindNamed(string, interface{}) (string, []interface{}, error) +} + +// Ext is a union interface which can bind, query, and exec, used by +// NamedQuery and NamedExec. +type Ext interface { + binder + Queryer + Execer +} + +// Preparer is an interface used by Preparex. +type Preparer interface { + Prepare(query string) (*sql.Stmt, error) +} + +// determine if any of our extensions are unsafe +func isUnsafe(i interface{}) bool { + switch v := i.(type) { + case Row: + return v.unsafe + case *Row: + return v.unsafe + case Rows: + return v.unsafe + case *Rows: + return v.unsafe + case NamedStmt: + return v.Stmt.unsafe + case *NamedStmt: + return v.Stmt.unsafe + case Stmt: + return v.unsafe + case *Stmt: + return v.unsafe + case qStmt: + return v.unsafe + case *qStmt: + return v.unsafe + case DB: + return v.unsafe + case *DB: + return v.unsafe + case Tx: + return v.unsafe + case *Tx: + return v.unsafe + case sql.Rows, *sql.Rows: + return false + default: + return false + } +} + +func mapperFor(i interface{}) *reflectx.Mapper { + switch i := i.(type) { + case DB: + return i.Mapper + case *DB: + return i.Mapper + case Tx: + return i.Mapper + case *Tx: + return i.Mapper + default: + return mapper() + } +} + +var _scannerInterface = reflect.TypeOf((*sql.Scanner)(nil)).Elem() +var _valuerInterface = reflect.TypeOf((*driver.Valuer)(nil)).Elem() + +// Row is a reimplementation of sql.Row in order to gain access to the underlying +// sql.Rows.Columns() data, necessary for StructScan. +type Row struct { + err error + unsafe bool + rows *sql.Rows + Mapper *reflectx.Mapper +} + +// Scan is a fixed implementation of sql.Row.Scan, which does not discard the +// underlying error from the internal rows object if it exists. +func (r *Row) Scan(dest ...interface{}) error { + if r.err != nil { + return r.err + } + + // TODO(bradfitz): for now we need to defensively clone all + // []byte that the driver returned (not permitting + // *RawBytes in Rows.Scan), since we're about to close + // the Rows in our defer, when we return from this function. + // the contract with the driver.Next(...) interface is that it + // can return slices into read-only temporary memory that's + // only valid until the next Scan/Close. But the TODO is that + // for a lot of drivers, this copy will be unnecessary. We + // should provide an optional interface for drivers to + // implement to say, "don't worry, the []bytes that I return + // from Next will not be modified again." (for instance, if + // they were obtained from the network anyway) But for now we + // don't care. + defer r.rows.Close() + for _, dp := range dest { + if _, ok := dp.(*sql.RawBytes); ok { + return errors.New("sql: RawBytes isn't allowed on Row.Scan") + } + } + + if !r.rows.Next() { + if err := r.rows.Err(); err != nil { + return err + } + return sql.ErrNoRows + } + err := r.rows.Scan(dest...) + if err != nil { + return err + } + // Make sure the query can be processed to completion with no errors. + if err := r.rows.Close(); err != nil { + return err + } + return nil +} + +// Columns returns the underlying sql.Rows.Columns(), or the deferred error usually +// returned by Row.Scan() +func (r *Row) Columns() ([]string, error) { + if r.err != nil { + return []string{}, r.err + } + return r.rows.Columns() +} + +// ColumnTypes returns the underlying sql.Rows.ColumnTypes(), or the deferred error +func (r *Row) ColumnTypes() ([]*sql.ColumnType, error) { + if r.err != nil { + return []*sql.ColumnType{}, r.err + } + return r.rows.ColumnTypes() +} + +// Err returns the error encountered while scanning. +func (r *Row) Err() error { + return r.err +} + +// DB is a wrapper around sql.DB which keeps track of the driverName upon Open, +// used mostly to automatically bind named queries using the right bindvars. +type DB struct { + *sql.DB + driverName string + unsafe bool + Mapper *reflectx.Mapper +} + +// NewDb returns a new sqlx DB wrapper for a pre-existing *sql.DB. The +// driverName of the original database is required for named query support. +func NewDb(db *sql.DB, driverName string) *DB { + return &DB{DB: db, driverName: driverName, Mapper: mapper()} +} + +// DriverName returns the driverName passed to the Open function for this DB. +func (db *DB) DriverName() string { + return db.driverName +} + +// Open is the same as sql.Open, but returns an *sqlx.DB instead. +func Open(driverName, dataSourceName string) (*DB, error) { + db, err := sql.Open(driverName, dataSourceName) + if err != nil { + return nil, err + } + return &DB{DB: db, driverName: driverName, Mapper: mapper()}, err +} + +// MustOpen is the same as sql.Open, but returns an *sqlx.DB instead and panics on error. +func MustOpen(driverName, dataSourceName string) *DB { + db, err := Open(driverName, dataSourceName) + if err != nil { + panic(err) + } + return db +} + +// MapperFunc sets a new mapper for this db using the default sqlx struct tag +// and the provided mapper function. +func (db *DB) MapperFunc(mf func(string) string) { + db.Mapper = reflectx.NewMapperFunc("db", mf) +} + +// Rebind transforms a query from QUESTION to the DB driver's bindvar type. +func (db *DB) Rebind(query string) string { + return Rebind(BindType(db.driverName), query) +} + +// Unsafe returns a version of DB which will silently succeed to scan when +// columns in the SQL result have no fields in the destination struct. +// sqlx.Stmt and sqlx.Tx which are created from this DB will inherit its +// safety behavior. +func (db *DB) Unsafe() *DB { + return &DB{DB: db.DB, driverName: db.driverName, unsafe: true, Mapper: db.Mapper} +} + +// BindNamed binds a query using the DB driver's bindvar type. +func (db *DB) BindNamed(query string, arg interface{}) (string, []interface{}, error) { + return bindNamedMapper(BindType(db.driverName), query, arg, db.Mapper) +} + +// NamedQuery using this DB. +// Any named placeholder parameters are replaced with fields from arg. +func (db *DB) NamedQuery(query string, arg interface{}) (*Rows, error) { + return NamedQuery(db, query, arg) +} + +// NamedExec using this DB. +// Any named placeholder parameters are replaced with fields from arg. +func (db *DB) NamedExec(query string, arg interface{}) (sql.Result, error) { + return NamedExec(db, query, arg) +} + +// Select using this DB. +// Any placeholder parameters are replaced with supplied args. +func (db *DB) Select(dest interface{}, query string, args ...interface{}) error { + return Select(db, dest, query, args...) +} + +// Get using this DB. +// Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func (db *DB) Get(dest interface{}, query string, args ...interface{}) error { + return Get(db, dest, query, args...) +} + +// MustBegin starts a transaction, and panics on error. Returns an *sqlx.Tx instead +// of an *sql.Tx. +func (db *DB) MustBegin() *Tx { + tx, err := db.Beginx() + if err != nil { + panic(err) + } + return tx +} + +// Beginx begins a transaction and returns an *sqlx.Tx instead of an *sql.Tx. +func (db *DB) Beginx() (*Tx, error) { + tx, err := db.DB.Begin() + if err != nil { + return nil, err + } + return &Tx{Tx: tx, driverName: db.driverName, unsafe: db.unsafe, Mapper: db.Mapper}, err +} + +// Queryx queries the database and returns an *sqlx.Rows. +// Any placeholder parameters are replaced with supplied args. +func (db *DB) Queryx(query string, args ...interface{}) (*Rows, error) { + r, err := db.DB.Query(query, args...) + if err != nil { + return nil, err + } + return &Rows{Rows: r, unsafe: db.unsafe, Mapper: db.Mapper}, err +} + +// QueryRowx queries the database and returns an *sqlx.Row. +// Any placeholder parameters are replaced with supplied args. +func (db *DB) QueryRowx(query string, args ...interface{}) *Row { + rows, err := db.DB.Query(query, args...) + return &Row{rows: rows, err: err, unsafe: db.unsafe, Mapper: db.Mapper} +} + +// MustExec (panic) runs MustExec using this database. +// Any placeholder parameters are replaced with supplied args. +func (db *DB) MustExec(query string, args ...interface{}) sql.Result { + return MustExec(db, query, args...) +} + +// Preparex returns an sqlx.Stmt instead of a sql.Stmt +func (db *DB) Preparex(query string) (*Stmt, error) { + return Preparex(db, query) +} + +// PrepareNamed returns an sqlx.NamedStmt +func (db *DB) PrepareNamed(query string) (*NamedStmt, error) { + return prepareNamed(db, query) +} + +// Conn is a wrapper around sql.Conn with extra functionality +type Conn struct { + *sql.Conn + driverName string + unsafe bool + Mapper *reflectx.Mapper +} + +// Tx is an sqlx wrapper around sql.Tx with extra functionality +type Tx struct { + *sql.Tx + driverName string + unsafe bool + Mapper *reflectx.Mapper +} + +// DriverName returns the driverName used by the DB which began this transaction. +func (tx *Tx) DriverName() string { + return tx.driverName +} + +// Rebind a query within a transaction's bindvar type. +func (tx *Tx) Rebind(query string) string { + return Rebind(BindType(tx.driverName), query) +} + +// Unsafe returns a version of Tx which will silently succeed to scan when +// columns in the SQL result have no fields in the destination struct. +func (tx *Tx) Unsafe() *Tx { + return &Tx{Tx: tx.Tx, driverName: tx.driverName, unsafe: true, Mapper: tx.Mapper} +} + +// BindNamed binds a query within a transaction's bindvar type. +func (tx *Tx) BindNamed(query string, arg interface{}) (string, []interface{}, error) { + return bindNamedMapper(BindType(tx.driverName), query, arg, tx.Mapper) +} + +// NamedQuery within a transaction. +// Any named placeholder parameters are replaced with fields from arg. +func (tx *Tx) NamedQuery(query string, arg interface{}) (*Rows, error) { + return NamedQuery(tx, query, arg) +} + +// NamedExec a named query within a transaction. +// Any named placeholder parameters are replaced with fields from arg. +func (tx *Tx) NamedExec(query string, arg interface{}) (sql.Result, error) { + return NamedExec(tx, query, arg) +} + +// Select within a transaction. +// Any placeholder parameters are replaced with supplied args. +func (tx *Tx) Select(dest interface{}, query string, args ...interface{}) error { + return Select(tx, dest, query, args...) +} + +// Queryx within a transaction. +// Any placeholder parameters are replaced with supplied args. +func (tx *Tx) Queryx(query string, args ...interface{}) (*Rows, error) { + r, err := tx.Tx.Query(query, args...) + if err != nil { + return nil, err + } + return &Rows{Rows: r, unsafe: tx.unsafe, Mapper: tx.Mapper}, err +} + +// QueryRowx within a transaction. +// Any placeholder parameters are replaced with supplied args. +func (tx *Tx) QueryRowx(query string, args ...interface{}) *Row { + rows, err := tx.Tx.Query(query, args...) + return &Row{rows: rows, err: err, unsafe: tx.unsafe, Mapper: tx.Mapper} +} + +// Get within a transaction. +// Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func (tx *Tx) Get(dest interface{}, query string, args ...interface{}) error { + return Get(tx, dest, query, args...) +} + +// MustExec runs MustExec within a transaction. +// Any placeholder parameters are replaced with supplied args. +func (tx *Tx) MustExec(query string, args ...interface{}) sql.Result { + return MustExec(tx, query, args...) +} + +// Preparex a statement within a transaction. +func (tx *Tx) Preparex(query string) (*Stmt, error) { + return Preparex(tx, query) +} + +// Stmtx returns a version of the prepared statement which runs within a transaction. Provided +// stmt can be either *sql.Stmt or *sqlx.Stmt. +func (tx *Tx) Stmtx(stmt interface{}) *Stmt { + var s *sql.Stmt + switch v := stmt.(type) { + case Stmt: + s = v.Stmt + case *Stmt: + s = v.Stmt + case *sql.Stmt: + s = v + default: + panic(fmt.Sprintf("non-statement type %v passed to Stmtx", reflect.ValueOf(stmt).Type())) + } + return &Stmt{Stmt: tx.Stmt(s), Mapper: tx.Mapper} +} + +// NamedStmt returns a version of the prepared statement which runs within a transaction. +func (tx *Tx) NamedStmt(stmt *NamedStmt) *NamedStmt { + return &NamedStmt{ + QueryString: stmt.QueryString, + Params: stmt.Params, + Stmt: tx.Stmtx(stmt.Stmt), + } +} + +// PrepareNamed returns an sqlx.NamedStmt +func (tx *Tx) PrepareNamed(query string) (*NamedStmt, error) { + return prepareNamed(tx, query) +} + +// Stmt is an sqlx wrapper around sql.Stmt with extra functionality +type Stmt struct { + *sql.Stmt + unsafe bool + Mapper *reflectx.Mapper +} + +// Unsafe returns a version of Stmt which will silently succeed to scan when +// columns in the SQL result have no fields in the destination struct. +func (s *Stmt) Unsafe() *Stmt { + return &Stmt{Stmt: s.Stmt, unsafe: true, Mapper: s.Mapper} +} + +// Select using the prepared statement. +// Any placeholder parameters are replaced with supplied args. +func (s *Stmt) Select(dest interface{}, args ...interface{}) error { + return Select(&qStmt{s}, dest, "", args...) +} + +// Get using the prepared statement. +// Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func (s *Stmt) Get(dest interface{}, args ...interface{}) error { + return Get(&qStmt{s}, dest, "", args...) +} + +// MustExec (panic) using this statement. Note that the query portion of the error +// output will be blank, as Stmt does not expose its query. +// Any placeholder parameters are replaced with supplied args. +func (s *Stmt) MustExec(args ...interface{}) sql.Result { + return MustExec(&qStmt{s}, "", args...) +} + +// QueryRowx using this statement. +// Any placeholder parameters are replaced with supplied args. +func (s *Stmt) QueryRowx(args ...interface{}) *Row { + qs := &qStmt{s} + return qs.QueryRowx("", args...) +} + +// Queryx using this statement. +// Any placeholder parameters are replaced with supplied args. +func (s *Stmt) Queryx(args ...interface{}) (*Rows, error) { + qs := &qStmt{s} + return qs.Queryx("", args...) +} + +// qStmt is an unexposed wrapper which lets you use a Stmt as a Queryer & Execer by +// implementing those interfaces and ignoring the `query` argument. +type qStmt struct{ *Stmt } + +func (q *qStmt) Query(query string, args ...interface{}) (*sql.Rows, error) { + return q.Stmt.Query(args...) +} + +func (q *qStmt) Queryx(query string, args ...interface{}) (*Rows, error) { + r, err := q.Stmt.Query(args...) + if err != nil { + return nil, err + } + return &Rows{Rows: r, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}, err +} + +func (q *qStmt) QueryRowx(query string, args ...interface{}) *Row { + rows, err := q.Stmt.Query(args...) + return &Row{rows: rows, err: err, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper} +} + +func (q *qStmt) Exec(query string, args ...interface{}) (sql.Result, error) { + return q.Stmt.Exec(args...) +} + +// Rows is a wrapper around sql.Rows which caches costly reflect operations +// during a looped StructScan +type Rows struct { + *sql.Rows + unsafe bool + Mapper *reflectx.Mapper + // these fields cache memory use for a rows during iteration w/ structScan + started bool + fields [][]int + values []interface{} +} + +// SliceScan using this Rows. +func (r *Rows) SliceScan() ([]interface{}, error) { + return SliceScan(r) +} + +// MapScan using this Rows. +func (r *Rows) MapScan(dest map[string]interface{}) error { + return MapScan(r, dest) +} + +// StructScan is like sql.Rows.Scan, but scans a single Row into a single Struct. +// Use this and iterate over Rows manually when the memory load of Select() might be +// prohibitive. *Rows.StructScan caches the reflect work of matching up column +// positions to fields to avoid that overhead per scan, which means it is not safe +// to run StructScan on the same Rows instance with different struct types. +func (r *Rows) StructScan(dest interface{}) error { + v := reflect.ValueOf(dest) + + if v.Kind() != reflect.Ptr { + return errors.New("must pass a pointer, not a value, to StructScan destination") + } + + v = v.Elem() + + if !r.started { + columns, err := r.Columns() + if err != nil { + return err + } + m := r.Mapper + + r.fields = m.TraversalsByName(v.Type(), columns) + // if we are not unsafe and are missing fields, return an error + if f, err := missingFields(r.fields); err != nil && !r.unsafe { + return fmt.Errorf("missing destination name %s in %T", columns[f], dest) + } + r.values = make([]interface{}, len(columns)) + r.started = true + } + + err := fieldsByTraversal(v, r.fields, r.values, true) + if err != nil { + return err + } + // scan into the struct field pointers and append to our results + err = r.Scan(r.values...) + if err != nil { + return err + } + return r.Err() +} + +// Connect to a database and verify with a ping. +func Connect(driverName, dataSourceName string) (*DB, error) { + db, err := Open(driverName, dataSourceName) + if err != nil { + return nil, err + } + err = db.Ping() + if err != nil { + db.Close() + return nil, err + } + return db, nil +} + +// MustConnect connects to a database and panics on error. +func MustConnect(driverName, dataSourceName string) *DB { + db, err := Connect(driverName, dataSourceName) + if err != nil { + panic(err) + } + return db +} + +// Preparex prepares a statement. +func Preparex(p Preparer, query string) (*Stmt, error) { + s, err := p.Prepare(query) + if err != nil { + return nil, err + } + return &Stmt{Stmt: s, unsafe: isUnsafe(p), Mapper: mapperFor(p)}, err +} + +// Select executes a query using the provided Queryer, and StructScans each row +// into dest, which must be a slice. If the slice elements are scannable, then +// the result set must have only one column. Otherwise, StructScan is used. +// The *sql.Rows are closed automatically. +// Any placeholder parameters are replaced with supplied args. +func Select(q Queryer, dest interface{}, query string, args ...interface{}) error { + rows, err := q.Queryx(query, args...) + if err != nil { + return err + } + // if something happens here, we want to make sure the rows are Closed + defer rows.Close() + return scanAll(rows, dest, false) +} + +// Get does a QueryRow using the provided Queryer, and scans the resulting row +// to dest. If dest is scannable, the result must only have one column. Otherwise, +// StructScan is used. Get will return sql.ErrNoRows like row.Scan would. +// Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func Get(q Queryer, dest interface{}, query string, args ...interface{}) error { + r := q.QueryRowx(query, args...) + return r.scanAny(dest, false) +} + +// LoadFile exec's every statement in a file (as a single call to Exec). +// LoadFile may return a nil *sql.Result if errors are encountered locating or +// reading the file at path. LoadFile reads the entire file into memory, so it +// is not suitable for loading large data dumps, but can be useful for initializing +// schemas or loading indexes. +// +// FIXME: this does not really work with multi-statement files for mattn/go-sqlite3 +// or the go-mysql-driver/mysql drivers; pq seems to be an exception here. Detecting +// this by requiring something with DriverName() and then attempting to split the +// queries will be difficult to get right, and its current driver-specific behavior +// is deemed at least not complex in its incorrectness. +func LoadFile(e Execer, path string) (*sql.Result, error) { + realpath, err := filepath.Abs(path) + if err != nil { + return nil, err + } + contents, err := ioutil.ReadFile(realpath) + if err != nil { + return nil, err + } + res, err := e.Exec(string(contents)) + return &res, err +} + +// MustExec execs the query using e and panics if there was an error. +// Any placeholder parameters are replaced with supplied args. +func MustExec(e Execer, query string, args ...interface{}) sql.Result { + res, err := e.Exec(query, args...) + if err != nil { + panic(err) + } + return res +} + +// SliceScan using this Rows. +func (r *Row) SliceScan() ([]interface{}, error) { + return SliceScan(r) +} + +// MapScan using this Rows. +func (r *Row) MapScan(dest map[string]interface{}) error { + return MapScan(r, dest) +} + +func (r *Row) scanAny(dest interface{}, structOnly bool) error { + if r.err != nil { + return r.err + } + if r.rows == nil { + r.err = sql.ErrNoRows + return r.err + } + defer r.rows.Close() + + v := reflect.ValueOf(dest) + if v.Kind() != reflect.Ptr { + return errors.New("must pass a pointer, not a value, to StructScan destination") + } + if v.IsNil() { + return errors.New("nil pointer passed to StructScan destination") + } + + base := reflectx.Deref(v.Type()) + scannable := isScannable(base) + + if structOnly && scannable { + return structOnlyError(base) + } + + columns, err := r.Columns() + if err != nil { + return err + } + + if scannable && len(columns) > 1 { + return fmt.Errorf("scannable dest type %s with >1 columns (%d) in result", base.Kind(), len(columns)) + } + + if scannable { + return r.Scan(dest) + } + + m := r.Mapper + + fields := m.TraversalsByName(v.Type(), columns) + // if we are not unsafe and are missing fields, return an error + if f, err := missingFields(fields); err != nil && !r.unsafe { + return fmt.Errorf("missing destination name %s in %T", columns[f], dest) + } + values := make([]interface{}, len(columns)) + + err = fieldsByTraversal(v, fields, values, true) + if err != nil { + return err + } + // scan into the struct field pointers and append to our results + return r.Scan(values...) +} + +// StructScan a single Row into dest. +func (r *Row) StructScan(dest interface{}) error { + return r.scanAny(dest, true) +} + +// SliceScan a row, returning a []interface{} with values similar to MapScan. +// This function is primarily intended for use where the number of columns +// is not known. Because you can pass an []interface{} directly to Scan, +// it's recommended that you do that as it will not have to allocate new +// slices per row. +func SliceScan(r ColScanner) ([]interface{}, error) { + // ignore r.started, since we needn't use reflect for anything. + columns, err := r.Columns() + if err != nil { + return []interface{}{}, err + } + + values := make([]interface{}, len(columns)) + for i := range values { + values[i] = new(interface{}) + } + + err = r.Scan(values...) + + if err != nil { + return values, err + } + + for i := range columns { + values[i] = *(values[i].(*interface{})) + } + + return values, r.Err() +} + +// MapScan scans a single Row into the dest map[string]interface{}. +// Use this to get results for SQL that might not be under your control +// (for instance, if you're building an interface for an SQL server that +// executes SQL from input). Please do not use this as a primary interface! +// This will modify the map sent to it in place, so reuse the same map with +// care. Columns which occur more than once in the result will overwrite +// each other! +func MapScan(r ColScanner, dest map[string]interface{}) error { + // ignore r.started, since we needn't use reflect for anything. + columns, err := r.Columns() + if err != nil { + return err + } + + values := make([]interface{}, len(columns)) + for i := range values { + values[i] = new(interface{}) + } + + err = r.Scan(values...) + if err != nil { + return err + } + + for i, column := range columns { + dest[column] = *(values[i].(*interface{})) + } + + return r.Err() +} + +type rowsi interface { + Close() error + Columns() ([]string, error) + Err() error + Next() bool + Scan(...interface{}) error +} + +// structOnlyError returns an error appropriate for type when a non-scannable +// struct is expected but something else is given +func structOnlyError(t reflect.Type) error { + isStruct := t.Kind() == reflect.Struct + isScanner := reflect.PtrTo(t).Implements(_scannerInterface) + if !isStruct { + return fmt.Errorf("expected %s but got %s", reflect.Struct, t.Kind()) + } + if isScanner { + return fmt.Errorf("structscan expects a struct dest but the provided struct type %s implements scanner", t.Name()) + } + return fmt.Errorf("expected a struct, but struct %s has no exported fields", t.Name()) +} + +// scanAll scans all rows into a destination, which must be a slice of any +// type. If the destination slice type is a Struct, then StructScan will be +// used on each row. If the destination is some other kind of base type, then +// each row must only have one column which can scan into that type. This +// allows you to do something like: +// +// rows, _ := db.Query("select id from people;") +// var ids []int +// scanAll(rows, &ids, false) +// +// and ids will be a list of the id results. I realize that this is a desirable +// interface to expose to users, but for now it will only be exposed via changes +// to `Get` and `Select`. The reason that this has been implemented like this is +// this is the only way to not duplicate reflect work in the new API while +// maintaining backwards compatibility. +func scanAll(rows rowsi, dest interface{}, structOnly bool) error { + var v, vp reflect.Value + + value := reflect.ValueOf(dest) + + // json.Unmarshal returns errors for these + if value.Kind() != reflect.Ptr { + return errors.New("must pass a pointer, not a value, to StructScan destination") + } + if value.IsNil() { + return errors.New("nil pointer passed to StructScan destination") + } + direct := reflect.Indirect(value) + + slice, err := baseType(value.Type(), reflect.Slice) + if err != nil { + return err + } + + isPtr := slice.Elem().Kind() == reflect.Ptr + base := reflectx.Deref(slice.Elem()) + scannable := isScannable(base) + + if structOnly && scannable { + return structOnlyError(base) + } + + columns, err := rows.Columns() + if err != nil { + return err + } + + // if it's a base type make sure it only has 1 column; if not return an error + if scannable && len(columns) > 1 { + return fmt.Errorf("non-struct dest type %s with >1 columns (%d)", base.Kind(), len(columns)) + } + + if !scannable { + var values []interface{} + var m *reflectx.Mapper + + switch rows.(type) { + case *Rows: + m = rows.(*Rows).Mapper + default: + m = mapper() + } + + fields := m.TraversalsByName(base, columns) + // if we are not unsafe and are missing fields, return an error + if f, err := missingFields(fields); err != nil && !isUnsafe(rows) { + return fmt.Errorf("missing destination name %s in %T", columns[f], dest) + } + values = make([]interface{}, len(columns)) + + for rows.Next() { + // create a new struct type (which returns PtrTo) and indirect it + vp = reflect.New(base) + v = reflect.Indirect(vp) + + err = fieldsByTraversal(v, fields, values, true) + if err != nil { + return err + } + + // scan into the struct field pointers and append to our results + err = rows.Scan(values...) + if err != nil { + return err + } + + if isPtr { + direct.Set(reflect.Append(direct, vp)) + } else { + direct.Set(reflect.Append(direct, v)) + } + } + } else { + for rows.Next() { + vp = reflect.New(base) + err = rows.Scan(vp.Interface()) + if err != nil { + return err + } + // append + if isPtr { + direct.Set(reflect.Append(direct, vp)) + } else { + direct.Set(reflect.Append(direct, reflect.Indirect(vp))) + } + } + } + + return rows.Err() +} + +// FIXME: StructScan was the very first bit of API in sqlx, and now unfortunately +// it doesn't really feel like it's named properly. There is an incongruency +// between this and the way that StructScan (which might better be ScanStruct +// anyway) works on a rows object. + +// StructScan all rows from an sql.Rows or an sqlx.Rows into the dest slice. +// StructScan will scan in the entire rows result, so if you do not want to +// allocate structs for the entire result, use Queryx and see sqlx.Rows.StructScan. +// If rows is sqlx.Rows, it will use its mapper, otherwise it will use the default. +func StructScan(rows rowsi, dest interface{}) error { + return scanAll(rows, dest, true) + +} + +// reflect helpers + +func baseType(t reflect.Type, expected reflect.Kind) (reflect.Type, error) { + t = reflectx.Deref(t) + if t.Kind() != expected { + return nil, fmt.Errorf("expected %s but got %s", expected, t.Kind()) + } + return t, nil +} + +// fieldsByName fills a values interface with fields from the passed value based +// on the traversals in int. If ptrs is true, return addresses instead of values. +// We write this instead of using FieldsByName to save allocations and map lookups +// when iterating over many rows. Empty traversals will get an interface pointer. +// Because of the necessity of requesting ptrs or values, it's considered a bit too +// specialized for inclusion in reflectx itself. +func fieldsByTraversal(v reflect.Value, traversals [][]int, values []interface{}, ptrs bool) error { + v = reflect.Indirect(v) + if v.Kind() != reflect.Struct { + return errors.New("argument not a struct") + } + + for i, traversal := range traversals { + if len(traversal) == 0 { + values[i] = new(interface{}) + continue + } + f := reflectx.FieldByIndexes(v, traversal) + if ptrs { + values[i] = f.Addr().Interface() + } else { + values[i] = f.Interface() + } + } + return nil +} + +func missingFields(transversals [][]int) (field int, err error) { + for i, t := range transversals { + if len(t) == 0 { + return i, errors.New("missing field") + } + } + return 0, nil +} diff --git a/vendor/github.com/jmoiron/sqlx/sqlx_context.go b/vendor/github.com/jmoiron/sqlx/sqlx_context.go new file mode 100644 index 0000000000..7aa4dd01e1 --- /dev/null +++ b/vendor/github.com/jmoiron/sqlx/sqlx_context.go @@ -0,0 +1,414 @@ +// +build go1.8 + +package sqlx + +import ( + "context" + "database/sql" + "fmt" + "io/ioutil" + "path/filepath" + "reflect" +) + +// ConnectContext to a database and verify with a ping. +func ConnectContext(ctx context.Context, driverName, dataSourceName string) (*DB, error) { + db, err := Open(driverName, dataSourceName) + if err != nil { + return db, err + } + err = db.PingContext(ctx) + return db, err +} + +// QueryerContext is an interface used by GetContext and SelectContext +type QueryerContext interface { + QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) + QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) + QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row +} + +// PreparerContext is an interface used by PreparexContext. +type PreparerContext interface { + PrepareContext(ctx context.Context, query string) (*sql.Stmt, error) +} + +// ExecerContext is an interface used by MustExecContext and LoadFileContext +type ExecerContext interface { + ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) +} + +// ExtContext is a union interface which can bind, query, and exec, with Context +// used by NamedQueryContext and NamedExecContext. +type ExtContext interface { + binder + QueryerContext + ExecerContext +} + +// SelectContext executes a query using the provided Queryer, and StructScans +// each row into dest, which must be a slice. If the slice elements are +// scannable, then the result set must have only one column. Otherwise, +// StructScan is used. The *sql.Rows are closed automatically. +// Any placeholder parameters are replaced with supplied args. +func SelectContext(ctx context.Context, q QueryerContext, dest interface{}, query string, args ...interface{}) error { + rows, err := q.QueryxContext(ctx, query, args...) + if err != nil { + return err + } + // if something happens here, we want to make sure the rows are Closed + defer rows.Close() + return scanAll(rows, dest, false) +} + +// PreparexContext prepares a statement. +// +// The provided context is used for the preparation of the statement, not for +// the execution of the statement. +func PreparexContext(ctx context.Context, p PreparerContext, query string) (*Stmt, error) { + s, err := p.PrepareContext(ctx, query) + if err != nil { + return nil, err + } + return &Stmt{Stmt: s, unsafe: isUnsafe(p), Mapper: mapperFor(p)}, err +} + +// GetContext does a QueryRow using the provided Queryer, and scans the +// resulting row to dest. If dest is scannable, the result must only have one +// column. Otherwise, StructScan is used. Get will return sql.ErrNoRows like +// row.Scan would. Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func GetContext(ctx context.Context, q QueryerContext, dest interface{}, query string, args ...interface{}) error { + r := q.QueryRowxContext(ctx, query, args...) + return r.scanAny(dest, false) +} + +// LoadFileContext exec's every statement in a file (as a single call to Exec). +// LoadFileContext may return a nil *sql.Result if errors are encountered +// locating or reading the file at path. LoadFile reads the entire file into +// memory, so it is not suitable for loading large data dumps, but can be useful +// for initializing schemas or loading indexes. +// +// FIXME: this does not really work with multi-statement files for mattn/go-sqlite3 +// or the go-mysql-driver/mysql drivers; pq seems to be an exception here. Detecting +// this by requiring something with DriverName() and then attempting to split the +// queries will be difficult to get right, and its current driver-specific behavior +// is deemed at least not complex in its incorrectness. +func LoadFileContext(ctx context.Context, e ExecerContext, path string) (*sql.Result, error) { + realpath, err := filepath.Abs(path) + if err != nil { + return nil, err + } + contents, err := ioutil.ReadFile(realpath) + if err != nil { + return nil, err + } + res, err := e.ExecContext(ctx, string(contents)) + return &res, err +} + +// MustExecContext execs the query using e and panics if there was an error. +// Any placeholder parameters are replaced with supplied args. +func MustExecContext(ctx context.Context, e ExecerContext, query string, args ...interface{}) sql.Result { + res, err := e.ExecContext(ctx, query, args...) + if err != nil { + panic(err) + } + return res +} + +// PrepareNamedContext returns an sqlx.NamedStmt +func (db *DB) PrepareNamedContext(ctx context.Context, query string) (*NamedStmt, error) { + return prepareNamedContext(ctx, db, query) +} + +// NamedQueryContext using this DB. +// Any named placeholder parameters are replaced with fields from arg. +func (db *DB) NamedQueryContext(ctx context.Context, query string, arg interface{}) (*Rows, error) { + return NamedQueryContext(ctx, db, query, arg) +} + +// NamedExecContext using this DB. +// Any named placeholder parameters are replaced with fields from arg. +func (db *DB) NamedExecContext(ctx context.Context, query string, arg interface{}) (sql.Result, error) { + return NamedExecContext(ctx, db, query, arg) +} + +// SelectContext using this DB. +// Any placeholder parameters are replaced with supplied args. +func (db *DB) SelectContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { + return SelectContext(ctx, db, dest, query, args...) +} + +// GetContext using this DB. +// Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func (db *DB) GetContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { + return GetContext(ctx, db, dest, query, args...) +} + +// PreparexContext returns an sqlx.Stmt instead of a sql.Stmt. +// +// The provided context is used for the preparation of the statement, not for +// the execution of the statement. +func (db *DB) PreparexContext(ctx context.Context, query string) (*Stmt, error) { + return PreparexContext(ctx, db, query) +} + +// QueryxContext queries the database and returns an *sqlx.Rows. +// Any placeholder parameters are replaced with supplied args. +func (db *DB) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) { + r, err := db.DB.QueryContext(ctx, query, args...) + if err != nil { + return nil, err + } + return &Rows{Rows: r, unsafe: db.unsafe, Mapper: db.Mapper}, err +} + +// QueryRowxContext queries the database and returns an *sqlx.Row. +// Any placeholder parameters are replaced with supplied args. +func (db *DB) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row { + rows, err := db.DB.QueryContext(ctx, query, args...) + return &Row{rows: rows, err: err, unsafe: db.unsafe, Mapper: db.Mapper} +} + +// MustBeginTx starts a transaction, and panics on error. Returns an *sqlx.Tx instead +// of an *sql.Tx. +// +// The provided context is used until the transaction is committed or rolled +// back. If the context is canceled, the sql package will roll back the +// transaction. Tx.Commit will return an error if the context provided to +// MustBeginContext is canceled. +func (db *DB) MustBeginTx(ctx context.Context, opts *sql.TxOptions) *Tx { + tx, err := db.BeginTxx(ctx, opts) + if err != nil { + panic(err) + } + return tx +} + +// MustExecContext (panic) runs MustExec using this database. +// Any placeholder parameters are replaced with supplied args. +func (db *DB) MustExecContext(ctx context.Context, query string, args ...interface{}) sql.Result { + return MustExecContext(ctx, db, query, args...) +} + +// BeginTxx begins a transaction and returns an *sqlx.Tx instead of an +// *sql.Tx. +// +// The provided context is used until the transaction is committed or rolled +// back. If the context is canceled, the sql package will roll back the +// transaction. Tx.Commit will return an error if the context provided to +// BeginxContext is canceled. +func (db *DB) BeginTxx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) { + tx, err := db.DB.BeginTx(ctx, opts) + if err != nil { + return nil, err + } + return &Tx{Tx: tx, driverName: db.driverName, unsafe: db.unsafe, Mapper: db.Mapper}, err +} + +// Connx returns an *sqlx.Conn instead of an *sql.Conn. +func (db *DB) Connx(ctx context.Context) (*Conn, error) { + conn, err := db.DB.Conn(ctx) + if err != nil { + return nil, err + } + + return &Conn{Conn: conn, driverName: db.driverName, unsafe: db.unsafe, Mapper: db.Mapper}, nil +} + +// BeginTxx begins a transaction and returns an *sqlx.Tx instead of an +// *sql.Tx. +// +// The provided context is used until the transaction is committed or rolled +// back. If the context is canceled, the sql package will roll back the +// transaction. Tx.Commit will return an error if the context provided to +// BeginxContext is canceled. +func (c *Conn) BeginTxx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) { + tx, err := c.Conn.BeginTx(ctx, opts) + if err != nil { + return nil, err + } + return &Tx{Tx: tx, driverName: c.driverName, unsafe: c.unsafe, Mapper: c.Mapper}, err +} + +// SelectContext using this Conn. +// Any placeholder parameters are replaced with supplied args. +func (c *Conn) SelectContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { + return SelectContext(ctx, c, dest, query, args...) +} + +// GetContext using this Conn. +// Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func (c *Conn) GetContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { + return GetContext(ctx, c, dest, query, args...) +} + +// PreparexContext returns an sqlx.Stmt instead of a sql.Stmt. +// +// The provided context is used for the preparation of the statement, not for +// the execution of the statement. +func (c *Conn) PreparexContext(ctx context.Context, query string) (*Stmt, error) { + return PreparexContext(ctx, c, query) +} + +// QueryxContext queries the database and returns an *sqlx.Rows. +// Any placeholder parameters are replaced with supplied args. +func (c *Conn) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) { + r, err := c.Conn.QueryContext(ctx, query, args...) + if err != nil { + return nil, err + } + return &Rows{Rows: r, unsafe: c.unsafe, Mapper: c.Mapper}, err +} + +// QueryRowxContext queries the database and returns an *sqlx.Row. +// Any placeholder parameters are replaced with supplied args. +func (c *Conn) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row { + rows, err := c.Conn.QueryContext(ctx, query, args...) + return &Row{rows: rows, err: err, unsafe: c.unsafe, Mapper: c.Mapper} +} + +// Rebind a query within a Conn's bindvar type. +func (c *Conn) Rebind(query string) string { + return Rebind(BindType(c.driverName), query) +} + +// StmtxContext returns a version of the prepared statement which runs within a +// transaction. Provided stmt can be either *sql.Stmt or *sqlx.Stmt. +func (tx *Tx) StmtxContext(ctx context.Context, stmt interface{}) *Stmt { + var s *sql.Stmt + switch v := stmt.(type) { + case Stmt: + s = v.Stmt + case *Stmt: + s = v.Stmt + case *sql.Stmt: + s = v + default: + panic(fmt.Sprintf("non-statement type %v passed to Stmtx", reflect.ValueOf(stmt).Type())) + } + return &Stmt{Stmt: tx.StmtContext(ctx, s), Mapper: tx.Mapper} +} + +// NamedStmtContext returns a version of the prepared statement which runs +// within a transaction. +func (tx *Tx) NamedStmtContext(ctx context.Context, stmt *NamedStmt) *NamedStmt { + return &NamedStmt{ + QueryString: stmt.QueryString, + Params: stmt.Params, + Stmt: tx.StmtxContext(ctx, stmt.Stmt), + } +} + +// PreparexContext returns an sqlx.Stmt instead of a sql.Stmt. +// +// The provided context is used for the preparation of the statement, not for +// the execution of the statement. +func (tx *Tx) PreparexContext(ctx context.Context, query string) (*Stmt, error) { + return PreparexContext(ctx, tx, query) +} + +// PrepareNamedContext returns an sqlx.NamedStmt +func (tx *Tx) PrepareNamedContext(ctx context.Context, query string) (*NamedStmt, error) { + return prepareNamedContext(ctx, tx, query) +} + +// MustExecContext runs MustExecContext within a transaction. +// Any placeholder parameters are replaced with supplied args. +func (tx *Tx) MustExecContext(ctx context.Context, query string, args ...interface{}) sql.Result { + return MustExecContext(ctx, tx, query, args...) +} + +// QueryxContext within a transaction and context. +// Any placeholder parameters are replaced with supplied args. +func (tx *Tx) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) { + r, err := tx.Tx.QueryContext(ctx, query, args...) + if err != nil { + return nil, err + } + return &Rows{Rows: r, unsafe: tx.unsafe, Mapper: tx.Mapper}, err +} + +// SelectContext within a transaction and context. +// Any placeholder parameters are replaced with supplied args. +func (tx *Tx) SelectContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { + return SelectContext(ctx, tx, dest, query, args...) +} + +// GetContext within a transaction and context. +// Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func (tx *Tx) GetContext(ctx context.Context, dest interface{}, query string, args ...interface{}) error { + return GetContext(ctx, tx, dest, query, args...) +} + +// QueryRowxContext within a transaction and context. +// Any placeholder parameters are replaced with supplied args. +func (tx *Tx) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row { + rows, err := tx.Tx.QueryContext(ctx, query, args...) + return &Row{rows: rows, err: err, unsafe: tx.unsafe, Mapper: tx.Mapper} +} + +// NamedExecContext using this Tx. +// Any named placeholder parameters are replaced with fields from arg. +func (tx *Tx) NamedExecContext(ctx context.Context, query string, arg interface{}) (sql.Result, error) { + return NamedExecContext(ctx, tx, query, arg) +} + +// SelectContext using the prepared statement. +// Any placeholder parameters are replaced with supplied args. +func (s *Stmt) SelectContext(ctx context.Context, dest interface{}, args ...interface{}) error { + return SelectContext(ctx, &qStmt{s}, dest, "", args...) +} + +// GetContext using the prepared statement. +// Any placeholder parameters are replaced with supplied args. +// An error is returned if the result set is empty. +func (s *Stmt) GetContext(ctx context.Context, dest interface{}, args ...interface{}) error { + return GetContext(ctx, &qStmt{s}, dest, "", args...) +} + +// MustExecContext (panic) using this statement. Note that the query portion of +// the error output will be blank, as Stmt does not expose its query. +// Any placeholder parameters are replaced with supplied args. +func (s *Stmt) MustExecContext(ctx context.Context, args ...interface{}) sql.Result { + return MustExecContext(ctx, &qStmt{s}, "", args...) +} + +// QueryRowxContext using this statement. +// Any placeholder parameters are replaced with supplied args. +func (s *Stmt) QueryRowxContext(ctx context.Context, args ...interface{}) *Row { + qs := &qStmt{s} + return qs.QueryRowxContext(ctx, "", args...) +} + +// QueryxContext using this statement. +// Any placeholder parameters are replaced with supplied args. +func (s *Stmt) QueryxContext(ctx context.Context, args ...interface{}) (*Rows, error) { + qs := &qStmt{s} + return qs.QueryxContext(ctx, "", args...) +} + +func (q *qStmt) QueryContext(ctx context.Context, query string, args ...interface{}) (*sql.Rows, error) { + return q.Stmt.QueryContext(ctx, args...) +} + +func (q *qStmt) QueryxContext(ctx context.Context, query string, args ...interface{}) (*Rows, error) { + r, err := q.Stmt.QueryContext(ctx, args...) + if err != nil { + return nil, err + } + return &Rows{Rows: r, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper}, err +} + +func (q *qStmt) QueryRowxContext(ctx context.Context, query string, args ...interface{}) *Row { + rows, err := q.Stmt.QueryContext(ctx, args...) + return &Row{rows: rows, err: err, unsafe: q.Stmt.unsafe, Mapper: q.Stmt.Mapper} +} + +func (q *qStmt) ExecContext(ctx context.Context, query string, args ...interface{}) (sql.Result, error) { + return q.Stmt.ExecContext(ctx, args...) +} diff --git a/vendor/github.com/lann/builder/.gitignore b/vendor/github.com/lann/builder/.gitignore new file mode 100644 index 0000000000..f54eb28f6e --- /dev/null +++ b/vendor/github.com/lann/builder/.gitignore @@ -0,0 +1,2 @@ +*~ +\#*# diff --git a/vendor/github.com/lann/builder/.travis.yml b/vendor/github.com/lann/builder/.travis.yml new file mode 100644 index 0000000000..c8860f69bc --- /dev/null +++ b/vendor/github.com/lann/builder/.travis.yml @@ -0,0 +1,7 @@ +language: go + +go: + - '1.8' + - '1.9' + - '1.10' + - tip diff --git a/vendor/github.com/lann/builder/LICENSE b/vendor/github.com/lann/builder/LICENSE new file mode 100644 index 0000000000..a109e8051c --- /dev/null +++ b/vendor/github.com/lann/builder/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2014-2015 Lann Martin + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/lann/builder/README.md b/vendor/github.com/lann/builder/README.md new file mode 100644 index 0000000000..3b18550bb9 --- /dev/null +++ b/vendor/github.com/lann/builder/README.md @@ -0,0 +1,68 @@ +# Builder - fluent immutable builders for Go + +[![GoDoc](https://godoc.org/github.com/lann/builder?status.png)](https://godoc.org/github.com/lann/builder) +[![Build Status](https://travis-ci.org/lann/builder.png?branch=master)](https://travis-ci.org/lann/builder) + +Builder was originally written for +[Squirrel](https://github.com/lann/squirrel), a fluent SQL generator. It +is probably the best example of Builder in action. + +Builder helps you write **fluent** DSLs for your libraries with method chaining: + +```go +resp := ReqBuilder. + Url("http://golang.org"). + Header("User-Agent", "Builder"). + Get() +``` + +Builder uses **immutable** persistent data structures +([these](https://github.com/mndrix/ps), specifically) +so that each step in your method chain can be reused: + +```go +build := WordBuilder.AddLetters("Build") +builder := build.AddLetters("er") +building := build.AddLetters("ing") +``` + +Builder makes it easy to **build** structs using the **builder** pattern +(*surprise!*): + +```go +import "github.com/lann/builder" + +type Muppet struct { + Name string + Friends []string +} + +type muppetBuilder builder.Builder + +func (b muppetBuilder) Name(name string) muppetBuilder { + return builder.Set(b, "Name", name).(muppetBuilder) +} + +func (b muppetBuilder) AddFriend(friend string) muppetBuilder { + return builder.Append(b, "Friends", friend).(muppetBuilder) +} + +func (b muppetBuilder) Build() Muppet { + return builder.GetStruct(b).(Muppet) +} + +var MuppetBuilder = builder.Register(muppetBuilder{}, Muppet{}).(muppetBuilder) +``` +```go +MuppetBuilder. + Name("Beaker"). + AddFriend("Dr. Honeydew"). + Build() + +=> Muppet{Name:"Beaker", Friends:[]string{"Dr. Honeydew"}} +``` + +## License + +Builder is released under the +[MIT License](http://www.opensource.org/licenses/MIT). diff --git a/vendor/github.com/lann/builder/builder.go b/vendor/github.com/lann/builder/builder.go new file mode 100644 index 0000000000..ff621406a4 --- /dev/null +++ b/vendor/github.com/lann/builder/builder.go @@ -0,0 +1,225 @@ +// Package builder provides a method for writing fluent immutable builders. +package builder + +import ( + "github.com/lann/ps" + "go/ast" + "reflect" +) + +// Builder stores a set of named values. +// +// New types can be declared with underlying type Builder and used with the +// functions in this package. See example. +// +// Instances of Builder should be treated as immutable. It is up to the +// implementor to ensure mutable values set on a Builder are not mutated while +// the Builder is in use. +type Builder struct { + builderMap ps.Map +} + +var ( + EmptyBuilder = Builder{ps.NewMap()} + emptyBuilderValue = reflect.ValueOf(EmptyBuilder) +) + +func getBuilderMap(builder interface{}) ps.Map { + b := convert(builder, Builder{}).(Builder) + + if b.builderMap == nil { + return ps.NewMap() + } + + return b.builderMap +} + +// Set returns a copy of the given builder with a new value set for the given +// name. +// +// Set (and all other functions taking a builder in this package) will panic if +// the given builder's underlying type is not Builder. +func Set(builder interface{}, name string, v interface{}) interface{} { + b := Builder{getBuilderMap(builder).Set(name, v)} + return convert(b, builder) +} + +// Delete returns a copy of the given builder with the given named value unset. +func Delete(builder interface{}, name string) interface{} { + b := Builder{getBuilderMap(builder).Delete(name)} + return convert(b, builder) +} + +// Append returns a copy of the given builder with new value(s) appended to the +// named list. If the value was previously unset or set with Set (even to a e.g. +// slice values), the new value(s) will be appended to an empty list. +func Append(builder interface{}, name string, vs ...interface{}) interface{} { + return Extend(builder, name, vs) +} + +// Extend behaves like Append, except it takes a single slice or array value +// which will be concatenated to the named list. +// +// Unlike a variadic call to Append - which requires a []interface{} value - +// Extend accepts slices or arrays of any type. +// +// Extend will panic if the given value is not a slice, array, or nil. +func Extend(builder interface{}, name string, vs interface{}) interface{} { + if vs == nil { + return builder + } + + maybeList, ok := getBuilderMap(builder).Lookup(name) + + var list ps.List + if ok { + list, ok = maybeList.(ps.List) + } + if !ok { + list = ps.NewList() + } + + forEach(vs, func(v interface{}) { + list = list.Cons(v) + }) + + return Set(builder, name, list) +} + +func listToSlice(list ps.List, arrayType reflect.Type) reflect.Value { + size := list.Size() + slice := reflect.MakeSlice(arrayType, size, size) + for i := size - 1; i >= 0; i-- { + val := reflect.ValueOf(list.Head()) + slice.Index(i).Set(val) + list = list.Tail() + } + return slice +} + +var anyArrayType = reflect.TypeOf([]interface{}{}) + +// Get retrieves a single named value from the given builder. +// If the value has not been set, it returns (nil, false). Otherwise, it will +// return (value, true). +// +// If the named value was last set with Append or Extend, the returned value +// will be a slice. If the given Builder has been registered with Register or +// RegisterType and the given name is an exported field of the registered +// struct, the returned slice will have the same type as that field. Otherwise +// the slice will have type []interface{}. It will panic if the given name is a +// registered struct's exported field and the value set on the Builder is not +// assignable to the field. +func Get(builder interface{}, name string) (interface{}, bool) { + val, ok := getBuilderMap(builder).Lookup(name) + if !ok { + return nil, false + } + + list, isList := val.(ps.List) + if isList { + arrayType := anyArrayType + + if ast.IsExported(name) { + structType := getBuilderStructType(reflect.TypeOf(builder)) + if structType != nil { + field, ok := (*structType).FieldByName(name) + if ok { + arrayType = field.Type + } + } + } + + val = listToSlice(list, arrayType).Interface() + } + + return val, true +} + +// GetMap returns a map[string]interface{} of the values set in the given +// builder. +// +// See notes on Get regarding returned slices. +func GetMap(builder interface{}) map[string]interface{} { + m := getBuilderMap(builder) + structType := getBuilderStructType(reflect.TypeOf(builder)) + + ret := make(map[string]interface{}, m.Size()) + + m.ForEach(func(name string, val ps.Any) { + list, isList := val.(ps.List) + if isList { + arrayType := anyArrayType + + if structType != nil { + field, ok := (*structType).FieldByName(name) + if ok { + arrayType = field.Type + } + } + + val = listToSlice(list, arrayType).Interface() + } + + ret[name] = val + }) + + return ret +} + +// GetStruct builds a new struct from the given registered builder. +// It will return nil if the given builder's type has not been registered with +// Register or RegisterValue. +// +// All values set on the builder with names that start with an uppercase letter +// (i.e. which would be exported if they were identifiers) are assigned to the +// corresponding exported fields of the struct. +// +// GetStruct will panic if any of these "exported" values are not assignable to +// their corresponding struct fields. +func GetStruct(builder interface{}) interface{} { + structVal := newBuilderStruct(reflect.TypeOf(builder)) + if structVal == nil { + return nil + } + return scanStruct(builder, structVal) +} + +// GetStructLike builds a new struct from the given builder with the same type +// as the given struct. +// +// All values set on the builder with names that start with an uppercase letter +// (i.e. which would be exported if they were identifiers) are assigned to the +// corresponding exported fields of the struct. +// +// ScanStruct will panic if any of these "exported" values are not assignable to +// their corresponding struct fields. +func GetStructLike(builder interface{}, strct interface{}) interface{} { + structVal := reflect.New(reflect.TypeOf(strct)).Elem() + return scanStruct(builder, &structVal) +} + +func scanStruct(builder interface{}, structVal *reflect.Value) interface{} { + getBuilderMap(builder).ForEach(func(name string, val ps.Any) { + if ast.IsExported(name) { + field := structVal.FieldByName(name) + + var value reflect.Value + switch v := val.(type) { + case nil: + switch field.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + value = reflect.Zero(field.Type()) + } + // nil is not valid for this Type; Set will panic + case ps.List: + value = listToSlice(v, field.Type()) + default: + value = reflect.ValueOf(val) + } + field.Set(value) + } + }) + + return structVal.Interface() +} diff --git a/vendor/github.com/lann/builder/reflect.go b/vendor/github.com/lann/builder/reflect.go new file mode 100644 index 0000000000..3b236e7918 --- /dev/null +++ b/vendor/github.com/lann/builder/reflect.go @@ -0,0 +1,24 @@ +package builder + +import "reflect" + +func convert(from interface{}, to interface{}) interface{} { + return reflect. + ValueOf(from). + Convert(reflect.TypeOf(to)). + Interface() +} + +func forEach(s interface{}, f func(interface{})) { + val := reflect.ValueOf(s) + + kind := val.Kind() + if kind != reflect.Slice && kind != reflect.Array { + panic(&reflect.ValueError{Method: "builder.forEach", Kind: kind}) + } + + l := val.Len() + for i := 0; i < l; i++ { + f(val.Index(i).Interface()) + } +} diff --git a/vendor/github.com/lann/builder/registry.go b/vendor/github.com/lann/builder/registry.go new file mode 100644 index 0000000000..612845418e --- /dev/null +++ b/vendor/github.com/lann/builder/registry.go @@ -0,0 +1,59 @@ +package builder + +import ( + "reflect" + "sync" +) + +var ( + registry = make(map[reflect.Type]reflect.Type) + registryMux sync.RWMutex +) + +// RegisterType maps the given builderType to a structType. +// This mapping affects the type of slices returned by Get and is required for +// GetStruct to work. +// +// Returns a Value containing an empty instance of the registered builderType. +// +// RegisterType will panic if builderType's underlying type is not Builder or +// if structType's Kind is not Struct. +func RegisterType(builderType reflect.Type, structType reflect.Type) *reflect.Value { + registryMux.Lock() + defer registryMux.Unlock() + structType.NumField() // Panic if structType is not a struct + registry[builderType] = structType + emptyValue := emptyBuilderValue.Convert(builderType) + return &emptyValue +} + +// Register wraps RegisterType, taking instances instead of Types. +// +// Returns an empty instance of the registered builder type which can be used +// as the initial value for builder expressions. See example. +func Register(builderProto, structProto interface{}) interface{} { + empty := RegisterType( + reflect.TypeOf(builderProto), + reflect.TypeOf(structProto), + ).Interface() + return empty +} + +func getBuilderStructType(builderType reflect.Type) *reflect.Type { + registryMux.RLock() + defer registryMux.RUnlock() + structType, ok := registry[builderType] + if !ok { + return nil + } + return &structType +} + +func newBuilderStruct(builderType reflect.Type) *reflect.Value { + structType := getBuilderStructType(builderType) + if structType == nil { + return nil + } + newStruct := reflect.New(*structType).Elem() + return &newStruct +} diff --git a/vendor/github.com/lann/ps/LICENSE b/vendor/github.com/lann/ps/LICENSE new file mode 100644 index 0000000000..69f9ae8d5a --- /dev/null +++ b/vendor/github.com/lann/ps/LICENSE @@ -0,0 +1,7 @@ +Copyright (c) 2013 Michael Hendricks + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/lann/ps/README.md b/vendor/github.com/lann/ps/README.md new file mode 100644 index 0000000000..325a5b3540 --- /dev/null +++ b/vendor/github.com/lann/ps/README.md @@ -0,0 +1,10 @@ +**This is a stable fork of https://github.com/mndrix/ps; it will not introduce breaking changes.** + +ps +== + +Persistent data structures for Go. See the [full package documentation](http://godoc.org/github.com/lann/ps) + +Install with + + go get github.com/lann/ps diff --git a/vendor/github.com/lann/ps/list.go b/vendor/github.com/lann/ps/list.go new file mode 100644 index 0000000000..48a1cebacf --- /dev/null +++ b/vendor/github.com/lann/ps/list.go @@ -0,0 +1,93 @@ +package ps + +// List is a persistent list of possibly heterogenous values. +type List interface { + // IsNil returns true if the list is empty + IsNil() bool + + // Cons returns a new list with val as the head + Cons(val Any) List + + // Head returns the first element of the list; + // panics if the list is empty + Head() Any + + // Tail returns a list with all elements except the head; + // panics if the list is empty + Tail() List + + // Size returns the list's length. This takes O(1) time. + Size() int + + // ForEach executes a callback for each value in the list. + ForEach(f func(Any)) + + // Reverse returns a list whose elements are in the opposite order as + // the original list. + Reverse() List +} + +// Immutable (i.e. persistent) list +type list struct { + depth int // the number of nodes after, and including, this one + value Any + tail *list +} + +// An empty list shared by all lists +var nilList = &list{} + +// NewList returns a new, empty list. The result is a singly linked +// list implementation. All lists share an empty tail, so allocating +// empty lists is efficient in time and memory. +func NewList() List { + return nilList +} + +func (self *list) IsNil() bool { + return self == nilList +} + +func (self *list) Size() int { + return self.depth +} + +func (tail *list) Cons(val Any) List { + var xs list + xs.depth = tail.depth + 1 + xs.value = val + xs.tail = tail + return &xs +} + +func (self *list) Head() Any { + if self.IsNil() { + panic("Called Head() on an empty list") + } + + return self.value +} + +func (self *list) Tail() List { + if self.IsNil() { + panic("Called Tail() on an empty list") + } + + return self.tail +} + +// ForEach executes a callback for each value in the list +func (self *list) ForEach(f func(Any)) { + if self.IsNil() { + return + } + f(self.Head()) + self.Tail().ForEach(f) +} + +// Reverse returns a list with elements in opposite order as this list +func (self *list) Reverse() List { + reversed := NewList() + self.ForEach(func(v Any) { reversed = reversed.Cons(v) }) + return reversed +} diff --git a/vendor/github.com/lann/ps/map.go b/vendor/github.com/lann/ps/map.go new file mode 100644 index 0000000000..192daec8f3 --- /dev/null +++ b/vendor/github.com/lann/ps/map.go @@ -0,0 +1,311 @@ +// Fully persistent data structures. A persistent data structure is a data +// structure that always preserves the previous version of itself when +// it is modified. Such data structures are effectively immutable, +// as their operations do not update the structure in-place, but instead +// always yield a new structure. +// +// Persistent +// data structures typically share structure among themselves. This allows +// operations to avoid copying the entire data structure. +package ps + +import ( + "bytes" + "fmt" +) + +// Any is a shorthand for Go's verbose interface{} type. +type Any interface{} + +// A Map associates unique keys (type string) with values (type Any). +type Map interface { + // IsNil returns true if the Map is empty + IsNil() bool + + // Set returns a new map in which key and value are associated. + // If the key didn't exist before, it's created; otherwise, the + // associated value is changed. + // This operation is O(log N) in the number of keys. + Set(key string, value Any) Map + + // Delete returns a new map with the association for key, if any, removed. + // This operation is O(log N) in the number of keys. + Delete(key string) Map + + // Lookup returns the value associated with a key, if any. If the key + // exists, the second return value is true; otherwise, false. + // This operation is O(log N) in the number of keys. + Lookup(key string) (Any, bool) + + // Size returns the number of key value pairs in the map. + // This takes O(1) time. + Size() int + + // ForEach executes a callback on each key value pair in the map. + ForEach(f func(key string, val Any)) + + // Keys returns a slice with all keys in this map. + // This operation is O(N) in the number of keys. + Keys() []string + + String() string +} + +// Immutable (i.e. persistent) associative array +const childCount = 8 +const shiftSize = 3 + +type tree struct { + count int + hash uint64 // hash of the key (used for tree balancing) + key string + value Any + children [childCount]*tree +} + +var nilMap = &tree{} + +// Recursively set nilMap's subtrees to point at itself. +// This eliminates all nil pointers in the map structure. +// All map nodes are created by cloning this structure so +// they avoid the problem too. +func init() { + for i := range nilMap.children { + nilMap.children[i] = nilMap + } +} + +// NewMap allocates a new, persistent map from strings to values of +// any type. +// This is currently implemented as a path-copying binary tree. +func NewMap() Map { + return nilMap +} + +func (self *tree) IsNil() bool { + return self == nilMap +} + +// clone returns an exact duplicate of a tree node +func (self *tree) clone() *tree { + var m tree + m = *self + return &m +} + +// constants for FNV-1a hash algorithm +const ( + offset64 uint64 = 14695981039346656037 + prime64 uint64 = 1099511628211 +) + +// hashKey returns a hash code for a given string +func hashKey(key string) uint64 { + hash := offset64 + for _, codepoint := range key { + hash ^= uint64(codepoint) + hash *= prime64 + } + return hash +} + +// Set returns a new map similar to this one but with key and value +// associated. If the key didn't exist, it's created; otherwise, the +// associated value is changed. +func (self *tree) Set(key string, value Any) Map { + hash := hashKey(key) + return setLowLevel(self, hash, hash, key, value) +} + +func setLowLevel(self *tree, partialHash, hash uint64, key string, value Any) *tree { + if self.IsNil() { // an empty tree is easy + m := self.clone() + m.count = 1 + m.hash = hash + m.key = key + m.value = value + return m + } + + if hash != self.hash { + m := self.clone() + i := partialHash % childCount + m.children[i] = setLowLevel(self.children[i], partialHash>>shiftSize, hash, key, value) + recalculateCount(m) + return m + } + + // replacing a key's previous value + m := self.clone() + m.value = value + return m +} + +// modifies a map by recalculating its key count based on the counts +// of its subtrees +func recalculateCount(m *tree) { + count := 0 + for _, t := range m.children { + count += t.Size() + } + m.count = count + 1 // add one to count ourself +} + +func (m *tree) Delete(key string) Map { + hash := hashKey(key) + newMap, _ := deleteLowLevel(m, hash, hash) + return newMap +} + +func deleteLowLevel(self *tree, partialHash, hash uint64) (*tree, bool) { + // empty trees are easy + if self.IsNil() { + return self, false + } + + if hash != self.hash { + i := partialHash % childCount + child, found := deleteLowLevel(self.children[i], partialHash>>shiftSize, hash) + if !found { + return self, false + } + newMap := self.clone() + newMap.children[i] = child + recalculateCount(newMap) + return newMap, true // ? this wasn't in the original code + } + + // we must delete our own node + if self.isLeaf() { // we have no children + return nilMap, true + } + /* + if self.subtreeCount() == 1 { // only one subtree + for _, t := range self.children { + if t != nilMap { + return t, true + } + } + panic("Tree with 1 subtree actually had no subtrees") + } + */ + + // find a node to replace us + i := -1 + size := -1 + for j, t := range self.children { + if t.Size() > size { + i = j + size = t.Size() + } + } + + // make chosen leaf smaller + replacement, child := self.children[i].deleteLeftmost() + newMap := replacement.clone() + for j := range self.children { + if j == i { + newMap.children[j] = child + } else { + newMap.children[j] = self.children[j] + } + } + recalculateCount(newMap) + return newMap, true +} + +// delete the leftmost node in a tree returning the node that +// was deleted and the tree left over after its deletion +func (m *tree) deleteLeftmost() (*tree, *tree) { + if m.isLeaf() { + return m, nilMap + } + + for i, t := range m.children { + if t != nilMap { + deleted, child := t.deleteLeftmost() + newMap := m.clone() + newMap.children[i] = child + recalculateCount(newMap) + return deleted, newMap + } + } + panic("Tree isn't a leaf but also had no children. How does that happen?") +} + +// isLeaf returns true if this is a leaf node +func (m *tree) isLeaf() bool { + return m.Size() == 1 +} + +// returns the number of child subtrees we have +func (m *tree) subtreeCount() int { + count := 0 + for _, t := range m.children { + if t != nilMap { + count++ + } + } + return count +} + +func (m *tree) Lookup(key string) (Any, bool) { + hash := hashKey(key) + return lookupLowLevel(m, hash, hash) +} + +func lookupLowLevel(self *tree, partialHash, hash uint64) (Any, bool) { + if self.IsNil() { // an empty tree is easy + return nil, false + } + + if hash != self.hash { + i := partialHash % childCount + return lookupLowLevel(self.children[i], partialHash>>shiftSize, hash) + } + + // we found it + return self.value, true +} + +func (m *tree) Size() int { + return m.count +} + +func (m *tree) ForEach(f func(key string, val Any)) { + if m.IsNil() { + return + } + + // ourself + f(m.key, m.value) + + // children + for _, t := range m.children { + if t != nilMap { + t.ForEach(f) + } + } +} + +func (m *tree) Keys() []string { + keys := make([]string, m.Size()) + i := 0 + m.ForEach(func(k string, v Any) { + keys[i] = k + i++ + }) + return keys +} + +// make it easier to display maps for debugging +func (m *tree) String() string { + keys := m.Keys() + buf := bytes.NewBufferString("{") + for _, key := range keys { + val, _ := m.Lookup(key) + fmt.Fprintf(buf, "%s: %s, ", key, val) + } + fmt.Fprintf(buf, "}\n") + return buf.String() +} diff --git a/vendor/github.com/lann/ps/profile.sh b/vendor/github.com/lann/ps/profile.sh new file mode 100644 index 0000000000..f03df05a5a --- /dev/null +++ b/vendor/github.com/lann/ps/profile.sh @@ -0,0 +1,3 @@ +#!/bin/sh +go test -c +./ps.test -test.run=none -test.bench=$2 -test.$1profile=$1.profile diff --git a/vendor/github.com/lib/pq/.gitignore b/vendor/github.com/lib/pq/.gitignore new file mode 100644 index 0000000000..3243952a4d --- /dev/null +++ b/vendor/github.com/lib/pq/.gitignore @@ -0,0 +1,6 @@ +.db +*.test +*~ +*.swp +.idea +.vscode \ No newline at end of file diff --git a/vendor/github.com/lib/pq/LICENSE.md b/vendor/github.com/lib/pq/LICENSE.md new file mode 100644 index 0000000000..5773904a30 --- /dev/null +++ b/vendor/github.com/lib/pq/LICENSE.md @@ -0,0 +1,8 @@ +Copyright (c) 2011-2013, 'pq' Contributors +Portions Copyright (C) 2011 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/lib/pq/README.md b/vendor/github.com/lib/pq/README.md new file mode 100644 index 0000000000..126ee5d35d --- /dev/null +++ b/vendor/github.com/lib/pq/README.md @@ -0,0 +1,36 @@ +# pq - A pure Go postgres driver for Go's database/sql package + +[![GoDoc](https://godoc.org/github.com/lib/pq?status.svg)](https://pkg.go.dev/github.com/lib/pq?tab=doc) + +## Install + + go get github.com/lib/pq + +## Features + +* SSL +* Handles bad connections for `database/sql` +* Scan `time.Time` correctly (i.e. `timestamp[tz]`, `time[tz]`, `date`) +* Scan binary blobs correctly (i.e. `bytea`) +* Package for `hstore` support +* COPY FROM support +* pq.ParseURL for converting urls to connection strings for sql.Open. +* Many libpq compatible environment variables +* Unix socket support +* Notifications: `LISTEN`/`NOTIFY` +* pgpass support +* GSS (Kerberos) auth + +## Tests + +`go test` is used for testing. See [TESTS.md](TESTS.md) for more details. + +## Status + +This package is currently in maintenance mode, which means: +1. It generally does not accept new features. +2. It does accept bug fixes and version compatability changes provided by the community. +3. Maintainers usually do not resolve reported issues. +4. Community members are encouraged to help each other with reported issues. + +For users that require new features or reliable resolution of reported bugs, we recommend using [pgx](https://github.com/jackc/pgx) which is under active development. diff --git a/vendor/github.com/lib/pq/TESTS.md b/vendor/github.com/lib/pq/TESTS.md new file mode 100644 index 0000000000..f05021115b --- /dev/null +++ b/vendor/github.com/lib/pq/TESTS.md @@ -0,0 +1,33 @@ +# Tests + +## Running Tests + +`go test` is used for testing. A running PostgreSQL +server is required, with the ability to log in. The +database to connect to test with is "pqgotest," on +"localhost" but these can be overridden using [environment +variables](https://www.postgresql.org/docs/9.3/static/libpq-envars.html). + +Example: + + PGHOST=/run/postgresql go test + +## Benchmarks + +A benchmark suite can be run as part of the tests: + + go test -bench . + +## Example setup (Docker) + +Run a postgres container: + +``` +docker run --expose 5432:5432 postgres +``` + +Run tests: + +``` +PGHOST=localhost PGPORT=5432 PGUSER=postgres PGSSLMODE=disable PGDATABASE=postgres go test +``` diff --git a/vendor/github.com/lib/pq/array.go b/vendor/github.com/lib/pq/array.go new file mode 100644 index 0000000000..39c8f7e2e0 --- /dev/null +++ b/vendor/github.com/lib/pq/array.go @@ -0,0 +1,895 @@ +package pq + +import ( + "bytes" + "database/sql" + "database/sql/driver" + "encoding/hex" + "fmt" + "reflect" + "strconv" + "strings" +) + +var typeByteSlice = reflect.TypeOf([]byte{}) +var typeDriverValuer = reflect.TypeOf((*driver.Valuer)(nil)).Elem() +var typeSQLScanner = reflect.TypeOf((*sql.Scanner)(nil)).Elem() + +// Array returns the optimal driver.Valuer and sql.Scanner for an array or +// slice of any dimension. +// +// For example: +// db.Query(`SELECT * FROM t WHERE id = ANY($1)`, pq.Array([]int{235, 401})) +// +// var x []sql.NullInt64 +// db.QueryRow(`SELECT ARRAY[235, 401]`).Scan(pq.Array(&x)) +// +// Scanning multi-dimensional arrays is not supported. Arrays where the lower +// bound is not one (such as `[0:0]={1}') are not supported. +func Array(a interface{}) interface { + driver.Valuer + sql.Scanner +} { + switch a := a.(type) { + case []bool: + return (*BoolArray)(&a) + case []float64: + return (*Float64Array)(&a) + case []float32: + return (*Float32Array)(&a) + case []int64: + return (*Int64Array)(&a) + case []int32: + return (*Int32Array)(&a) + case []string: + return (*StringArray)(&a) + case [][]byte: + return (*ByteaArray)(&a) + + case *[]bool: + return (*BoolArray)(a) + case *[]float64: + return (*Float64Array)(a) + case *[]float32: + return (*Float32Array)(a) + case *[]int64: + return (*Int64Array)(a) + case *[]int32: + return (*Int32Array)(a) + case *[]string: + return (*StringArray)(a) + case *[][]byte: + return (*ByteaArray)(a) + } + + return GenericArray{a} +} + +// ArrayDelimiter may be optionally implemented by driver.Valuer or sql.Scanner +// to override the array delimiter used by GenericArray. +type ArrayDelimiter interface { + // ArrayDelimiter returns the delimiter character(s) for this element's type. + ArrayDelimiter() string +} + +// BoolArray represents a one-dimensional array of the PostgreSQL boolean type. +type BoolArray []bool + +// Scan implements the sql.Scanner interface. +func (a *BoolArray) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to BoolArray", src) +} + +func (a *BoolArray) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "BoolArray") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(BoolArray, len(elems)) + for i, v := range elems { + if len(v) != 1 { + return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v) + } + switch v[0] { + case 't': + b[i] = true + case 'f': + b[i] = false + default: + return fmt.Errorf("pq: could not parse boolean array index %d: invalid boolean %q", i, v) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a BoolArray) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be exactly two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 1+2*n) + + for i := 0; i < n; i++ { + b[2*i] = ',' + if a[i] { + b[1+2*i] = 't' + } else { + b[1+2*i] = 'f' + } + } + + b[0] = '{' + b[2*n] = '}' + + return string(b), nil + } + + return "{}", nil +} + +// ByteaArray represents a one-dimensional array of the PostgreSQL bytea type. +type ByteaArray [][]byte + +// Scan implements the sql.Scanner interface. +func (a *ByteaArray) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to ByteaArray", src) +} + +func (a *ByteaArray) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "ByteaArray") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(ByteaArray, len(elems)) + for i, v := range elems { + b[i], err = parseBytea(v) + if err != nil { + return fmt.Errorf("could not parse bytea array index %d: %s", i, err.Error()) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. It uses the "hex" format which +// is only supported on PostgreSQL 9.0 or newer. +func (a ByteaArray) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, 2*N bytes of quotes, + // 3*N bytes of hex formatting, and N-1 bytes of delimiters. + size := 1 + 6*n + for _, x := range a { + size += hex.EncodedLen(len(x)) + } + + b := make([]byte, size) + + for i, s := 0, b; i < n; i++ { + o := copy(s, `,"\\x`) + o += hex.Encode(s[o:], a[i]) + s[o] = '"' + s = s[o+1:] + } + + b[0] = '{' + b[size-1] = '}' + + return string(b), nil + } + + return "{}", nil +} + +// Float64Array represents a one-dimensional array of the PostgreSQL double +// precision type. +type Float64Array []float64 + +// Scan implements the sql.Scanner interface. +func (a *Float64Array) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to Float64Array", src) +} + +func (a *Float64Array) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "Float64Array") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(Float64Array, len(elems)) + for i, v := range elems { + if b[i], err = strconv.ParseFloat(string(v), 64); err != nil { + return fmt.Errorf("pq: parsing array element index %d: %v", i, err) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a Float64Array) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 1, 1+2*n) + b[0] = '{' + + b = strconv.AppendFloat(b, a[0], 'f', -1, 64) + for i := 1; i < n; i++ { + b = append(b, ',') + b = strconv.AppendFloat(b, a[i], 'f', -1, 64) + } + + return string(append(b, '}')), nil + } + + return "{}", nil +} + +// Float32Array represents a one-dimensional array of the PostgreSQL double +// precision type. +type Float32Array []float32 + +// Scan implements the sql.Scanner interface. +func (a *Float32Array) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to Float32Array", src) +} + +func (a *Float32Array) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "Float32Array") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(Float32Array, len(elems)) + for i, v := range elems { + var x float64 + if x, err = strconv.ParseFloat(string(v), 32); err != nil { + return fmt.Errorf("pq: parsing array element index %d: %v", i, err) + } + b[i] = float32(x) + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a Float32Array) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 1, 1+2*n) + b[0] = '{' + + b = strconv.AppendFloat(b, float64(a[0]), 'f', -1, 32) + for i := 1; i < n; i++ { + b = append(b, ',') + b = strconv.AppendFloat(b, float64(a[i]), 'f', -1, 32) + } + + return string(append(b, '}')), nil + } + + return "{}", nil +} + +// GenericArray implements the driver.Valuer and sql.Scanner interfaces for +// an array or slice of any dimension. +type GenericArray struct{ A interface{} } + +func (GenericArray) evaluateDestination(rt reflect.Type) (reflect.Type, func([]byte, reflect.Value) error, string) { + var assign func([]byte, reflect.Value) error + var del = "," + + // TODO calculate the assign function for other types + // TODO repeat this section on the element type of arrays or slices (multidimensional) + { + if reflect.PtrTo(rt).Implements(typeSQLScanner) { + // dest is always addressable because it is an element of a slice. + assign = func(src []byte, dest reflect.Value) (err error) { + ss := dest.Addr().Interface().(sql.Scanner) + if src == nil { + err = ss.Scan(nil) + } else { + err = ss.Scan(src) + } + return + } + goto FoundType + } + + assign = func([]byte, reflect.Value) error { + return fmt.Errorf("pq: scanning to %s is not implemented; only sql.Scanner", rt) + } + } + +FoundType: + + if ad, ok := reflect.Zero(rt).Interface().(ArrayDelimiter); ok { + del = ad.ArrayDelimiter() + } + + return rt, assign, del +} + +// Scan implements the sql.Scanner interface. +func (a GenericArray) Scan(src interface{}) error { + dpv := reflect.ValueOf(a.A) + switch { + case dpv.Kind() != reflect.Ptr: + return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A) + case dpv.IsNil(): + return fmt.Errorf("pq: destination %T is nil", a.A) + } + + dv := dpv.Elem() + switch dv.Kind() { + case reflect.Slice: + case reflect.Array: + default: + return fmt.Errorf("pq: destination %T is not a pointer to array or slice", a.A) + } + + switch src := src.(type) { + case []byte: + return a.scanBytes(src, dv) + case string: + return a.scanBytes([]byte(src), dv) + case nil: + if dv.Kind() == reflect.Slice { + dv.Set(reflect.Zero(dv.Type())) + return nil + } + } + + return fmt.Errorf("pq: cannot convert %T to %s", src, dv.Type()) +} + +func (a GenericArray) scanBytes(src []byte, dv reflect.Value) error { + dtype, assign, del := a.evaluateDestination(dv.Type().Elem()) + dims, elems, err := parseArray(src, []byte(del)) + if err != nil { + return err + } + + // TODO allow multidimensional + + if len(dims) > 1 { + return fmt.Errorf("pq: scanning from multidimensional ARRAY%s is not implemented", + strings.Replace(fmt.Sprint(dims), " ", "][", -1)) + } + + // Treat a zero-dimensional array like an array with a single dimension of zero. + if len(dims) == 0 { + dims = append(dims, 0) + } + + for i, rt := 0, dv.Type(); i < len(dims); i, rt = i+1, rt.Elem() { + switch rt.Kind() { + case reflect.Slice: + case reflect.Array: + if rt.Len() != dims[i] { + return fmt.Errorf("pq: cannot convert ARRAY%s to %s", + strings.Replace(fmt.Sprint(dims), " ", "][", -1), dv.Type()) + } + default: + // TODO handle multidimensional + } + } + + values := reflect.MakeSlice(reflect.SliceOf(dtype), len(elems), len(elems)) + for i, e := range elems { + if err := assign(e, values.Index(i)); err != nil { + return fmt.Errorf("pq: parsing array element index %d: %v", i, err) + } + } + + // TODO handle multidimensional + + switch dv.Kind() { + case reflect.Slice: + dv.Set(values.Slice(0, dims[0])) + case reflect.Array: + for i := 0; i < dims[0]; i++ { + dv.Index(i).Set(values.Index(i)) + } + } + + return nil +} + +// Value implements the driver.Valuer interface. +func (a GenericArray) Value() (driver.Value, error) { + if a.A == nil { + return nil, nil + } + + rv := reflect.ValueOf(a.A) + + switch rv.Kind() { + case reflect.Slice: + if rv.IsNil() { + return nil, nil + } + case reflect.Array: + default: + return nil, fmt.Errorf("pq: Unable to convert %T to array", a.A) + } + + if n := rv.Len(); n > 0 { + // There will be at least two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 0, 1+2*n) + + b, _, err := appendArray(b, rv, n) + return string(b), err + } + + return "{}", nil +} + +// Int64Array represents a one-dimensional array of the PostgreSQL integer types. +type Int64Array []int64 + +// Scan implements the sql.Scanner interface. +func (a *Int64Array) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to Int64Array", src) +} + +func (a *Int64Array) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "Int64Array") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(Int64Array, len(elems)) + for i, v := range elems { + if b[i], err = strconv.ParseInt(string(v), 10, 64); err != nil { + return fmt.Errorf("pq: parsing array element index %d: %v", i, err) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a Int64Array) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 1, 1+2*n) + b[0] = '{' + + b = strconv.AppendInt(b, a[0], 10) + for i := 1; i < n; i++ { + b = append(b, ',') + b = strconv.AppendInt(b, a[i], 10) + } + + return string(append(b, '}')), nil + } + + return "{}", nil +} + +// Int32Array represents a one-dimensional array of the PostgreSQL integer types. +type Int32Array []int32 + +// Scan implements the sql.Scanner interface. +func (a *Int32Array) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to Int32Array", src) +} + +func (a *Int32Array) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "Int32Array") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(Int32Array, len(elems)) + for i, v := range elems { + x, err := strconv.ParseInt(string(v), 10, 32) + if err != nil { + return fmt.Errorf("pq: parsing array element index %d: %v", i, err) + } + b[i] = int32(x) + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a Int32Array) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, N bytes of values, + // and N-1 bytes of delimiters. + b := make([]byte, 1, 1+2*n) + b[0] = '{' + + b = strconv.AppendInt(b, int64(a[0]), 10) + for i := 1; i < n; i++ { + b = append(b, ',') + b = strconv.AppendInt(b, int64(a[i]), 10) + } + + return string(append(b, '}')), nil + } + + return "{}", nil +} + +// StringArray represents a one-dimensional array of the PostgreSQL character types. +type StringArray []string + +// Scan implements the sql.Scanner interface. +func (a *StringArray) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + return a.scanBytes(src) + case string: + return a.scanBytes([]byte(src)) + case nil: + *a = nil + return nil + } + + return fmt.Errorf("pq: cannot convert %T to StringArray", src) +} + +func (a *StringArray) scanBytes(src []byte) error { + elems, err := scanLinearArray(src, []byte{','}, "StringArray") + if err != nil { + return err + } + if *a != nil && len(elems) == 0 { + *a = (*a)[:0] + } else { + b := make(StringArray, len(elems)) + for i, v := range elems { + if b[i] = string(v); v == nil { + return fmt.Errorf("pq: parsing array element index %d: cannot convert nil to string", i) + } + } + *a = b + } + return nil +} + +// Value implements the driver.Valuer interface. +func (a StringArray) Value() (driver.Value, error) { + if a == nil { + return nil, nil + } + + if n := len(a); n > 0 { + // There will be at least two curly brackets, 2*N bytes of quotes, + // and N-1 bytes of delimiters. + b := make([]byte, 1, 1+3*n) + b[0] = '{' + + b = appendArrayQuotedBytes(b, []byte(a[0])) + for i := 1; i < n; i++ { + b = append(b, ',') + b = appendArrayQuotedBytes(b, []byte(a[i])) + } + + return string(append(b, '}')), nil + } + + return "{}", nil +} + +// appendArray appends rv to the buffer, returning the extended buffer and +// the delimiter used between elements. +// +// It panics when n <= 0 or rv's Kind is not reflect.Array nor reflect.Slice. +func appendArray(b []byte, rv reflect.Value, n int) ([]byte, string, error) { + var del string + var err error + + b = append(b, '{') + + if b, del, err = appendArrayElement(b, rv.Index(0)); err != nil { + return b, del, err + } + + for i := 1; i < n; i++ { + b = append(b, del...) + if b, del, err = appendArrayElement(b, rv.Index(i)); err != nil { + return b, del, err + } + } + + return append(b, '}'), del, nil +} + +// appendArrayElement appends rv to the buffer, returning the extended buffer +// and the delimiter to use before the next element. +// +// When rv's Kind is neither reflect.Array nor reflect.Slice, it is converted +// using driver.DefaultParameterConverter and the resulting []byte or string +// is double-quoted. +// +// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO +func appendArrayElement(b []byte, rv reflect.Value) ([]byte, string, error) { + if k := rv.Kind(); k == reflect.Array || k == reflect.Slice { + if t := rv.Type(); t != typeByteSlice && !t.Implements(typeDriverValuer) { + if n := rv.Len(); n > 0 { + return appendArray(b, rv, n) + } + + return b, "", nil + } + } + + var del = "," + var err error + var iv interface{} = rv.Interface() + + if ad, ok := iv.(ArrayDelimiter); ok { + del = ad.ArrayDelimiter() + } + + if iv, err = driver.DefaultParameterConverter.ConvertValue(iv); err != nil { + return b, del, err + } + + switch v := iv.(type) { + case nil: + return append(b, "NULL"...), del, nil + case []byte: + return appendArrayQuotedBytes(b, v), del, nil + case string: + return appendArrayQuotedBytes(b, []byte(v)), del, nil + } + + b, err = appendValue(b, iv) + return b, del, err +} + +func appendArrayQuotedBytes(b, v []byte) []byte { + b = append(b, '"') + for { + i := bytes.IndexAny(v, `"\`) + if i < 0 { + b = append(b, v...) + break + } + if i > 0 { + b = append(b, v[:i]...) + } + b = append(b, '\\', v[i]) + v = v[i+1:] + } + return append(b, '"') +} + +func appendValue(b []byte, v driver.Value) ([]byte, error) { + return append(b, encode(nil, v, 0)...), nil +} + +// parseArray extracts the dimensions and elements of an array represented in +// text format. Only representations emitted by the backend are supported. +// Notably, whitespace around brackets and delimiters is significant, and NULL +// is case-sensitive. +// +// See http://www.postgresql.org/docs/current/static/arrays.html#ARRAYS-IO +func parseArray(src, del []byte) (dims []int, elems [][]byte, err error) { + var depth, i int + + if len(src) < 1 || src[0] != '{' { + return nil, nil, fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '{', 0) + } + +Open: + for i < len(src) { + switch src[i] { + case '{': + depth++ + i++ + case '}': + elems = make([][]byte, 0) + goto Close + default: + break Open + } + } + dims = make([]int, i) + +Element: + for i < len(src) { + switch src[i] { + case '{': + if depth == len(dims) { + break Element + } + depth++ + dims[depth-1] = 0 + i++ + case '"': + var elem = []byte{} + var escape bool + for i++; i < len(src); i++ { + if escape { + elem = append(elem, src[i]) + escape = false + } else { + switch src[i] { + default: + elem = append(elem, src[i]) + case '\\': + escape = true + case '"': + elems = append(elems, elem) + i++ + break Element + } + } + } + default: + for start := i; i < len(src); i++ { + if bytes.HasPrefix(src[i:], del) || src[i] == '}' { + elem := src[start:i] + if len(elem) == 0 { + return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i) + } + if bytes.Equal(elem, []byte("NULL")) { + elem = nil + } + elems = append(elems, elem) + break Element + } + } + } + } + + for i < len(src) { + if bytes.HasPrefix(src[i:], del) && depth > 0 { + dims[depth-1]++ + i += len(del) + goto Element + } else if src[i] == '}' && depth > 0 { + dims[depth-1]++ + depth-- + i++ + } else { + return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i) + } + } + +Close: + for i < len(src) { + if src[i] == '}' && depth > 0 { + depth-- + i++ + } else { + return nil, nil, fmt.Errorf("pq: unable to parse array; unexpected %q at offset %d", src[i], i) + } + } + if depth > 0 { + err = fmt.Errorf("pq: unable to parse array; expected %q at offset %d", '}', i) + } + if err == nil { + for _, d := range dims { + if (len(elems) % d) != 0 { + err = fmt.Errorf("pq: multidimensional arrays must have elements with matching dimensions") + } + } + } + return +} + +func scanLinearArray(src, del []byte, typ string) (elems [][]byte, err error) { + dims, elems, err := parseArray(src, del) + if err != nil { + return nil, err + } + if len(dims) > 1 { + return nil, fmt.Errorf("pq: cannot convert ARRAY%s to %s", strings.Replace(fmt.Sprint(dims), " ", "][", -1), typ) + } + return elems, err +} diff --git a/vendor/github.com/lib/pq/buf.go b/vendor/github.com/lib/pq/buf.go new file mode 100644 index 0000000000..4b0a0a8f7e --- /dev/null +++ b/vendor/github.com/lib/pq/buf.go @@ -0,0 +1,91 @@ +package pq + +import ( + "bytes" + "encoding/binary" + + "github.com/lib/pq/oid" +) + +type readBuf []byte + +func (b *readBuf) int32() (n int) { + n = int(int32(binary.BigEndian.Uint32(*b))) + *b = (*b)[4:] + return +} + +func (b *readBuf) oid() (n oid.Oid) { + n = oid.Oid(binary.BigEndian.Uint32(*b)) + *b = (*b)[4:] + return +} + +// N.B: this is actually an unsigned 16-bit integer, unlike int32 +func (b *readBuf) int16() (n int) { + n = int(binary.BigEndian.Uint16(*b)) + *b = (*b)[2:] + return +} + +func (b *readBuf) string() string { + i := bytes.IndexByte(*b, 0) + if i < 0 { + errorf("invalid message format; expected string terminator") + } + s := (*b)[:i] + *b = (*b)[i+1:] + return string(s) +} + +func (b *readBuf) next(n int) (v []byte) { + v = (*b)[:n] + *b = (*b)[n:] + return +} + +func (b *readBuf) byte() byte { + return b.next(1)[0] +} + +type writeBuf struct { + buf []byte + pos int +} + +func (b *writeBuf) int32(n int) { + x := make([]byte, 4) + binary.BigEndian.PutUint32(x, uint32(n)) + b.buf = append(b.buf, x...) +} + +func (b *writeBuf) int16(n int) { + x := make([]byte, 2) + binary.BigEndian.PutUint16(x, uint16(n)) + b.buf = append(b.buf, x...) +} + +func (b *writeBuf) string(s string) { + b.buf = append(append(b.buf, s...), '\000') +} + +func (b *writeBuf) byte(c byte) { + b.buf = append(b.buf, c) +} + +func (b *writeBuf) bytes(v []byte) { + b.buf = append(b.buf, v...) +} + +func (b *writeBuf) wrap() []byte { + p := b.buf[b.pos:] + binary.BigEndian.PutUint32(p, uint32(len(p))) + return b.buf +} + +func (b *writeBuf) next(c byte) { + p := b.buf[b.pos:] + binary.BigEndian.PutUint32(p, uint32(len(p))) + b.pos = len(b.buf) + 1 + b.buf = append(b.buf, c, 0, 0, 0, 0) +} diff --git a/vendor/github.com/lib/pq/conn.go b/vendor/github.com/lib/pq/conn.go new file mode 100644 index 0000000000..e050d53586 --- /dev/null +++ b/vendor/github.com/lib/pq/conn.go @@ -0,0 +1,2060 @@ +package pq + +import ( + "bufio" + "context" + "crypto/md5" + "crypto/sha256" + "database/sql" + "database/sql/driver" + "encoding/binary" + "errors" + "fmt" + "io" + "net" + "os" + "os/user" + "path" + "path/filepath" + "strconv" + "strings" + "sync" + "time" + "unicode" + + "github.com/lib/pq/oid" + "github.com/lib/pq/scram" +) + +// Common error types +var ( + ErrNotSupported = errors.New("pq: Unsupported command") + ErrInFailedTransaction = errors.New("pq: Could not complete operation in a failed transaction") + ErrSSLNotSupported = errors.New("pq: SSL is not enabled on the server") + ErrSSLKeyHasWorldPermissions = errors.New("pq: Private key file has group or world access. Permissions should be u=rw (0600) or less") + ErrCouldNotDetectUsername = errors.New("pq: Could not detect default username. Please provide one explicitly") + + errUnexpectedReady = errors.New("unexpected ReadyForQuery") + errNoRowsAffected = errors.New("no RowsAffected available after the empty statement") + errNoLastInsertID = errors.New("no LastInsertId available after the empty statement") +) + +// Compile time validation that our types implement the expected interfaces +var ( + _ driver.Driver = Driver{} +) + +// Driver is the Postgres database driver. +type Driver struct{} + +// Open opens a new connection to the database. name is a connection string. +// Most users should only use it through database/sql package from the standard +// library. +func (d Driver) Open(name string) (driver.Conn, error) { + return Open(name) +} + +func init() { + sql.Register("postgres", &Driver{}) +} + +type parameterStatus struct { + // server version in the same format as server_version_num, or 0 if + // unavailable + serverVersion int + + // the current location based on the TimeZone value of the session, if + // available + currentLocation *time.Location +} + +type transactionStatus byte + +const ( + txnStatusIdle transactionStatus = 'I' + txnStatusIdleInTransaction transactionStatus = 'T' + txnStatusInFailedTransaction transactionStatus = 'E' +) + +func (s transactionStatus) String() string { + switch s { + case txnStatusIdle: + return "idle" + case txnStatusIdleInTransaction: + return "idle in transaction" + case txnStatusInFailedTransaction: + return "in a failed transaction" + default: + errorf("unknown transactionStatus %d", s) + } + + panic("not reached") +} + +// Dialer is the dialer interface. It can be used to obtain more control over +// how pq creates network connections. +type Dialer interface { + Dial(network, address string) (net.Conn, error) + DialTimeout(network, address string, timeout time.Duration) (net.Conn, error) +} + +// DialerContext is the context-aware dialer interface. +type DialerContext interface { + DialContext(ctx context.Context, network, address string) (net.Conn, error) +} + +type defaultDialer struct { + d net.Dialer +} + +func (d defaultDialer) Dial(network, address string) (net.Conn, error) { + return d.d.Dial(network, address) +} +func (d defaultDialer) DialTimeout(network, address string, timeout time.Duration) (net.Conn, error) { + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + return d.DialContext(ctx, network, address) +} +func (d defaultDialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) { + return d.d.DialContext(ctx, network, address) +} + +type conn struct { + c net.Conn + buf *bufio.Reader + namei int + scratch [512]byte + txnStatus transactionStatus + txnFinish func() + + // Save connection arguments to use during CancelRequest. + dialer Dialer + opts values + + // Cancellation key data for use with CancelRequest messages. + processID int + secretKey int + + parameterStatus parameterStatus + + saveMessageType byte + saveMessageBuffer []byte + + // If an error is set, this connection is bad and all public-facing + // functions should return the appropriate error by calling get() + // (ErrBadConn) or getForNext(). + err syncErr + + // If set, this connection should never use the binary format when + // receiving query results from prepared statements. Only provided for + // debugging. + disablePreparedBinaryResult bool + + // Whether to always send []byte parameters over as binary. Enables single + // round-trip mode for non-prepared Query calls. + binaryParameters bool + + // If true this connection is in the middle of a COPY + inCopy bool + + // If not nil, notices will be synchronously sent here + noticeHandler func(*Error) + + // If not nil, notifications will be synchronously sent here + notificationHandler func(*Notification) + + // GSSAPI context + gss GSS +} + +type syncErr struct { + err error + sync.Mutex +} + +// Return ErrBadConn if connection is bad. +func (e *syncErr) get() error { + e.Lock() + defer e.Unlock() + if e.err != nil { + return driver.ErrBadConn + } + return nil +} + +// Return the error set on the connection. Currently only used by rows.Next. +func (e *syncErr) getForNext() error { + e.Lock() + defer e.Unlock() + return e.err +} + +// Set error, only if it isn't set yet. +func (e *syncErr) set(err error) { + if err == nil { + panic("attempt to set nil err") + } + e.Lock() + defer e.Unlock() + if e.err == nil { + e.err = err + } +} + +// Handle driver-side settings in parsed connection string. +func (cn *conn) handleDriverSettings(o values) (err error) { + boolSetting := func(key string, val *bool) error { + if value, ok := o[key]; ok { + if value == "yes" { + *val = true + } else if value == "no" { + *val = false + } else { + return fmt.Errorf("unrecognized value %q for %s", value, key) + } + } + return nil + } + + err = boolSetting("disable_prepared_binary_result", &cn.disablePreparedBinaryResult) + if err != nil { + return err + } + return boolSetting("binary_parameters", &cn.binaryParameters) +} + +func (cn *conn) handlePgpass(o values) { + // if a password was supplied, do not process .pgpass + if _, ok := o["password"]; ok { + return + } + filename := os.Getenv("PGPASSFILE") + if filename == "" { + // XXX this code doesn't work on Windows where the default filename is + // XXX %APPDATA%\postgresql\pgpass.conf + // Prefer $HOME over user.Current due to glibc bug: golang.org/issue/13470 + userHome := os.Getenv("HOME") + if userHome == "" { + user, err := user.Current() + if err != nil { + return + } + userHome = user.HomeDir + } + filename = filepath.Join(userHome, ".pgpass") + } + fileinfo, err := os.Stat(filename) + if err != nil { + return + } + mode := fileinfo.Mode() + if mode&(0x77) != 0 { + // XXX should warn about incorrect .pgpass permissions as psql does + return + } + file, err := os.Open(filename) + if err != nil { + return + } + defer file.Close() + scanner := bufio.NewScanner(io.Reader(file)) + hostname := o["host"] + ntw, _ := network(o) + port := o["port"] + db := o["dbname"] + username := o["user"] + // From: https://github.com/tg/pgpass/blob/master/reader.go + getFields := func(s string) []string { + fs := make([]string, 0, 5) + f := make([]rune, 0, len(s)) + + var esc bool + for _, c := range s { + switch { + case esc: + f = append(f, c) + esc = false + case c == '\\': + esc = true + case c == ':': + fs = append(fs, string(f)) + f = f[:0] + default: + f = append(f, c) + } + } + return append(fs, string(f)) + } + for scanner.Scan() { + line := scanner.Text() + if len(line) == 0 || line[0] == '#' { + continue + } + split := getFields(line) + if len(split) != 5 { + continue + } + if (split[0] == "*" || split[0] == hostname || (split[0] == "localhost" && (hostname == "" || ntw == "unix"))) && (split[1] == "*" || split[1] == port) && (split[2] == "*" || split[2] == db) && (split[3] == "*" || split[3] == username) { + o["password"] = split[4] + return + } + } +} + +func (cn *conn) writeBuf(b byte) *writeBuf { + cn.scratch[0] = b + return &writeBuf{ + buf: cn.scratch[:5], + pos: 1, + } +} + +// Open opens a new connection to the database. dsn is a connection string. +// Most users should only use it through database/sql package from the standard +// library. +func Open(dsn string) (_ driver.Conn, err error) { + return DialOpen(defaultDialer{}, dsn) +} + +// DialOpen opens a new connection to the database using a dialer. +func DialOpen(d Dialer, dsn string) (_ driver.Conn, err error) { + c, err := NewConnector(dsn) + if err != nil { + return nil, err + } + c.dialer = d + return c.open(context.Background()) +} + +func (c *Connector) open(ctx context.Context) (cn *conn, err error) { + // Handle any panics during connection initialization. Note that we + // specifically do *not* want to use errRecover(), as that would turn any + // connection errors into ErrBadConns, hiding the real error message from + // the user. + defer errRecoverNoErrBadConn(&err) + + // Create a new values map (copy). This makes it so maps in different + // connections do not reference the same underlying data structure, so it + // is safe for multiple connections to concurrently write to their opts. + o := make(values) + for k, v := range c.opts { + o[k] = v + } + + cn = &conn{ + opts: o, + dialer: c.dialer, + } + err = cn.handleDriverSettings(o) + if err != nil { + return nil, err + } + cn.handlePgpass(o) + + cn.c, err = dial(ctx, c.dialer, o) + if err != nil { + return nil, err + } + + err = cn.ssl(o) + if err != nil { + if cn.c != nil { + cn.c.Close() + } + return nil, err + } + + // cn.startup panics on error. Make sure we don't leak cn.c. + panicking := true + defer func() { + if panicking { + cn.c.Close() + } + }() + + cn.buf = bufio.NewReader(cn.c) + cn.startup(o) + + // reset the deadline, in case one was set (see dial) + if timeout, ok := o["connect_timeout"]; ok && timeout != "0" { + err = cn.c.SetDeadline(time.Time{}) + } + panicking = false + return cn, err +} + +func dial(ctx context.Context, d Dialer, o values) (net.Conn, error) { + network, address := network(o) + + // Zero or not specified means wait indefinitely. + if timeout, ok := o["connect_timeout"]; ok && timeout != "0" { + seconds, err := strconv.ParseInt(timeout, 10, 0) + if err != nil { + return nil, fmt.Errorf("invalid value for parameter connect_timeout: %s", err) + } + duration := time.Duration(seconds) * time.Second + + // connect_timeout should apply to the entire connection establishment + // procedure, so we both use a timeout for the TCP connection + // establishment and set a deadline for doing the initial handshake. + // The deadline is then reset after startup() is done. + deadline := time.Now().Add(duration) + var conn net.Conn + if dctx, ok := d.(DialerContext); ok { + ctx, cancel := context.WithTimeout(ctx, duration) + defer cancel() + conn, err = dctx.DialContext(ctx, network, address) + } else { + conn, err = d.DialTimeout(network, address, duration) + } + if err != nil { + return nil, err + } + err = conn.SetDeadline(deadline) + return conn, err + } + if dctx, ok := d.(DialerContext); ok { + return dctx.DialContext(ctx, network, address) + } + return d.Dial(network, address) +} + +func network(o values) (string, string) { + host := o["host"] + + if strings.HasPrefix(host, "/") { + sockPath := path.Join(host, ".s.PGSQL."+o["port"]) + return "unix", sockPath + } + + return "tcp", net.JoinHostPort(host, o["port"]) +} + +type values map[string]string + +// scanner implements a tokenizer for libpq-style option strings. +type scanner struct { + s []rune + i int +} + +// newScanner returns a new scanner initialized with the option string s. +func newScanner(s string) *scanner { + return &scanner{[]rune(s), 0} +} + +// Next returns the next rune. +// It returns 0, false if the end of the text has been reached. +func (s *scanner) Next() (rune, bool) { + if s.i >= len(s.s) { + return 0, false + } + r := s.s[s.i] + s.i++ + return r, true +} + +// SkipSpaces returns the next non-whitespace rune. +// It returns 0, false if the end of the text has been reached. +func (s *scanner) SkipSpaces() (rune, bool) { + r, ok := s.Next() + for unicode.IsSpace(r) && ok { + r, ok = s.Next() + } + return r, ok +} + +// parseOpts parses the options from name and adds them to the values. +// +// The parsing code is based on conninfo_parse from libpq's fe-connect.c +func parseOpts(name string, o values) error { + s := newScanner(name) + + for { + var ( + keyRunes, valRunes []rune + r rune + ok bool + ) + + if r, ok = s.SkipSpaces(); !ok { + break + } + + // Scan the key + for !unicode.IsSpace(r) && r != '=' { + keyRunes = append(keyRunes, r) + if r, ok = s.Next(); !ok { + break + } + } + + // Skip any whitespace if we're not at the = yet + if r != '=' { + r, ok = s.SkipSpaces() + } + + // The current character should be = + if r != '=' || !ok { + return fmt.Errorf(`missing "=" after %q in connection info string"`, string(keyRunes)) + } + + // Skip any whitespace after the = + if r, ok = s.SkipSpaces(); !ok { + // If we reach the end here, the last value is just an empty string as per libpq. + o[string(keyRunes)] = "" + break + } + + if r != '\'' { + for !unicode.IsSpace(r) { + if r == '\\' { + if r, ok = s.Next(); !ok { + return fmt.Errorf(`missing character after backslash`) + } + } + valRunes = append(valRunes, r) + + if r, ok = s.Next(); !ok { + break + } + } + } else { + quote: + for { + if r, ok = s.Next(); !ok { + return fmt.Errorf(`unterminated quoted string literal in connection string`) + } + switch r { + case '\'': + break quote + case '\\': + r, _ = s.Next() + fallthrough + default: + valRunes = append(valRunes, r) + } + } + } + + o[string(keyRunes)] = string(valRunes) + } + + return nil +} + +func (cn *conn) isInTransaction() bool { + return cn.txnStatus == txnStatusIdleInTransaction || + cn.txnStatus == txnStatusInFailedTransaction +} + +func (cn *conn) checkIsInTransaction(intxn bool) { + if cn.isInTransaction() != intxn { + cn.err.set(driver.ErrBadConn) + errorf("unexpected transaction status %v", cn.txnStatus) + } +} + +func (cn *conn) Begin() (_ driver.Tx, err error) { + return cn.begin("") +} + +func (cn *conn) begin(mode string) (_ driver.Tx, err error) { + if err := cn.err.get(); err != nil { + return nil, err + } + defer cn.errRecover(&err) + + cn.checkIsInTransaction(false) + _, commandTag, err := cn.simpleExec("BEGIN" + mode) + if err != nil { + return nil, err + } + if commandTag != "BEGIN" { + cn.err.set(driver.ErrBadConn) + return nil, fmt.Errorf("unexpected command tag %s", commandTag) + } + if cn.txnStatus != txnStatusIdleInTransaction { + cn.err.set(driver.ErrBadConn) + return nil, fmt.Errorf("unexpected transaction status %v", cn.txnStatus) + } + return cn, nil +} + +func (cn *conn) closeTxn() { + if finish := cn.txnFinish; finish != nil { + finish() + } +} + +func (cn *conn) Commit() (err error) { + defer cn.closeTxn() + if err := cn.err.get(); err != nil { + return err + } + defer cn.errRecover(&err) + + cn.checkIsInTransaction(true) + // We don't want the client to think that everything is okay if it tries + // to commit a failed transaction. However, no matter what we return, + // database/sql will release this connection back into the free connection + // pool so we have to abort the current transaction here. Note that you + // would get the same behaviour if you issued a COMMIT in a failed + // transaction, so it's also the least surprising thing to do here. + if cn.txnStatus == txnStatusInFailedTransaction { + if err := cn.rollback(); err != nil { + return err + } + return ErrInFailedTransaction + } + + _, commandTag, err := cn.simpleExec("COMMIT") + if err != nil { + if cn.isInTransaction() { + cn.err.set(driver.ErrBadConn) + } + return err + } + if commandTag != "COMMIT" { + cn.err.set(driver.ErrBadConn) + return fmt.Errorf("unexpected command tag %s", commandTag) + } + cn.checkIsInTransaction(false) + return nil +} + +func (cn *conn) Rollback() (err error) { + defer cn.closeTxn() + if err := cn.err.get(); err != nil { + return err + } + defer cn.errRecover(&err) + return cn.rollback() +} + +func (cn *conn) rollback() (err error) { + cn.checkIsInTransaction(true) + _, commandTag, err := cn.simpleExec("ROLLBACK") + if err != nil { + if cn.isInTransaction() { + cn.err.set(driver.ErrBadConn) + } + return err + } + if commandTag != "ROLLBACK" { + return fmt.Errorf("unexpected command tag %s", commandTag) + } + cn.checkIsInTransaction(false) + return nil +} + +func (cn *conn) gname() string { + cn.namei++ + return strconv.FormatInt(int64(cn.namei), 10) +} + +func (cn *conn) simpleExec(q string) (res driver.Result, commandTag string, err error) { + b := cn.writeBuf('Q') + b.string(q) + cn.send(b) + + for { + t, r := cn.recv1() + switch t { + case 'C': + res, commandTag = cn.parseComplete(r.string()) + case 'Z': + cn.processReadyForQuery(r) + if res == nil && err == nil { + err = errUnexpectedReady + } + // done + return + case 'E': + err = parseError(r) + case 'I': + res = emptyRows + case 'T', 'D': + // ignore any results + default: + cn.err.set(driver.ErrBadConn) + errorf("unknown response for simple query: %q", t) + } + } +} + +func (cn *conn) simpleQuery(q string) (res *rows, err error) { + defer cn.errRecover(&err) + + b := cn.writeBuf('Q') + b.string(q) + cn.send(b) + + for { + t, r := cn.recv1() + switch t { + case 'C', 'I': + // We allow queries which don't return any results through Query as + // well as Exec. We still have to give database/sql a rows object + // the user can close, though, to avoid connections from being + // leaked. A "rows" with done=true works fine for that purpose. + if err != nil { + cn.err.set(driver.ErrBadConn) + errorf("unexpected message %q in simple query execution", t) + } + if res == nil { + res = &rows{ + cn: cn, + } + } + // Set the result and tag to the last command complete if there wasn't a + // query already run. Although queries usually return from here and cede + // control to Next, a query with zero results does not. + if t == 'C' { + res.result, res.tag = cn.parseComplete(r.string()) + if res.colNames != nil { + return + } + } + res.done = true + case 'Z': + cn.processReadyForQuery(r) + // done + return + case 'E': + res = nil + err = parseError(r) + case 'D': + if res == nil { + cn.err.set(driver.ErrBadConn) + errorf("unexpected DataRow in simple query execution") + } + // the query didn't fail; kick off to Next + cn.saveMessage(t, r) + return + case 'T': + // res might be non-nil here if we received a previous + // CommandComplete, but that's fine; just overwrite it + res = &rows{cn: cn} + res.rowsHeader = parsePortalRowDescribe(r) + + // To work around a bug in QueryRow in Go 1.2 and earlier, wait + // until the first DataRow has been received. + default: + cn.err.set(driver.ErrBadConn) + errorf("unknown response for simple query: %q", t) + } + } +} + +type noRows struct{} + +var emptyRows noRows + +var _ driver.Result = noRows{} + +func (noRows) LastInsertId() (int64, error) { + return 0, errNoLastInsertID +} + +func (noRows) RowsAffected() (int64, error) { + return 0, errNoRowsAffected +} + +// Decides which column formats to use for a prepared statement. The input is +// an array of type oids, one element per result column. +func decideColumnFormats(colTyps []fieldDesc, forceText bool) (colFmts []format, colFmtData []byte) { + if len(colTyps) == 0 { + return nil, colFmtDataAllText + } + + colFmts = make([]format, len(colTyps)) + if forceText { + return colFmts, colFmtDataAllText + } + + allBinary := true + allText := true + for i, t := range colTyps { + switch t.OID { + // This is the list of types to use binary mode for when receiving them + // through a prepared statement. If a type appears in this list, it + // must also be implemented in binaryDecode in encode.go. + case oid.T_bytea: + fallthrough + case oid.T_int8: + fallthrough + case oid.T_int4: + fallthrough + case oid.T_int2: + fallthrough + case oid.T_uuid: + colFmts[i] = formatBinary + allText = false + + default: + allBinary = false + } + } + + if allBinary { + return colFmts, colFmtDataAllBinary + } else if allText { + return colFmts, colFmtDataAllText + } else { + colFmtData = make([]byte, 2+len(colFmts)*2) + binary.BigEndian.PutUint16(colFmtData, uint16(len(colFmts))) + for i, v := range colFmts { + binary.BigEndian.PutUint16(colFmtData[2+i*2:], uint16(v)) + } + return colFmts, colFmtData + } +} + +func (cn *conn) prepareTo(q, stmtName string) *stmt { + st := &stmt{cn: cn, name: stmtName} + + b := cn.writeBuf('P') + b.string(st.name) + b.string(q) + b.int16(0) + + b.next('D') + b.byte('S') + b.string(st.name) + + b.next('S') + cn.send(b) + + cn.readParseResponse() + st.paramTyps, st.colNames, st.colTyps = cn.readStatementDescribeResponse() + st.colFmts, st.colFmtData = decideColumnFormats(st.colTyps, cn.disablePreparedBinaryResult) + cn.readReadyForQuery() + return st +} + +func (cn *conn) Prepare(q string) (_ driver.Stmt, err error) { + if err := cn.err.get(); err != nil { + return nil, err + } + defer cn.errRecover(&err) + + if len(q) >= 4 && strings.EqualFold(q[:4], "COPY") { + s, err := cn.prepareCopyIn(q) + if err == nil { + cn.inCopy = true + } + return s, err + } + return cn.prepareTo(q, cn.gname()), nil +} + +func (cn *conn) Close() (err error) { + // Skip cn.bad return here because we always want to close a connection. + defer cn.errRecover(&err) + + // Ensure that cn.c.Close is always run. Since error handling is done with + // panics and cn.errRecover, the Close must be in a defer. + defer func() { + cerr := cn.c.Close() + if err == nil { + err = cerr + } + }() + + // Don't go through send(); ListenerConn relies on us not scribbling on the + // scratch buffer of this connection. + return cn.sendSimpleMessage('X') +} + +// Implement the "Queryer" interface +func (cn *conn) Query(query string, args []driver.Value) (driver.Rows, error) { + return cn.query(query, args) +} + +func (cn *conn) query(query string, args []driver.Value) (_ *rows, err error) { + if err := cn.err.get(); err != nil { + return nil, err + } + if cn.inCopy { + return nil, errCopyInProgress + } + defer cn.errRecover(&err) + + // Check to see if we can use the "simpleQuery" interface, which is + // *much* faster than going through prepare/exec + if len(args) == 0 { + return cn.simpleQuery(query) + } + + if cn.binaryParameters { + cn.sendBinaryModeQuery(query, args) + + cn.readParseResponse() + cn.readBindResponse() + rows := &rows{cn: cn} + rows.rowsHeader = cn.readPortalDescribeResponse() + cn.postExecuteWorkaround() + return rows, nil + } + st := cn.prepareTo(query, "") + st.exec(args) + return &rows{ + cn: cn, + rowsHeader: st.rowsHeader, + }, nil +} + +// Implement the optional "Execer" interface for one-shot queries +func (cn *conn) Exec(query string, args []driver.Value) (res driver.Result, err error) { + if err := cn.err.get(); err != nil { + return nil, err + } + defer cn.errRecover(&err) + + // Check to see if we can use the "simpleExec" interface, which is + // *much* faster than going through prepare/exec + if len(args) == 0 { + // ignore commandTag, our caller doesn't care + r, _, err := cn.simpleExec(query) + return r, err + } + + if cn.binaryParameters { + cn.sendBinaryModeQuery(query, args) + + cn.readParseResponse() + cn.readBindResponse() + cn.readPortalDescribeResponse() + cn.postExecuteWorkaround() + res, _, err = cn.readExecuteResponse("Execute") + return res, err + } + // Use the unnamed statement to defer planning until bind + // time, or else value-based selectivity estimates cannot be + // used. + st := cn.prepareTo(query, "") + r, err := st.Exec(args) + if err != nil { + panic(err) + } + return r, err +} + +type safeRetryError struct { + Err error +} + +func (se *safeRetryError) Error() string { + return se.Err.Error() +} + +func (cn *conn) send(m *writeBuf) { + n, err := cn.c.Write(m.wrap()) + if err != nil { + if n == 0 { + err = &safeRetryError{Err: err} + } + panic(err) + } +} + +func (cn *conn) sendStartupPacket(m *writeBuf) error { + _, err := cn.c.Write((m.wrap())[1:]) + return err +} + +// Send a message of type typ to the server on the other end of cn. The +// message should have no payload. This method does not use the scratch +// buffer. +func (cn *conn) sendSimpleMessage(typ byte) (err error) { + _, err = cn.c.Write([]byte{typ, '\x00', '\x00', '\x00', '\x04'}) + return err +} + +// saveMessage memorizes a message and its buffer in the conn struct. +// recvMessage will then return these values on the next call to it. This +// method is useful in cases where you have to see what the next message is +// going to be (e.g. to see whether it's an error or not) but you can't handle +// the message yourself. +func (cn *conn) saveMessage(typ byte, buf *readBuf) { + if cn.saveMessageType != 0 { + cn.err.set(driver.ErrBadConn) + errorf("unexpected saveMessageType %d", cn.saveMessageType) + } + cn.saveMessageType = typ + cn.saveMessageBuffer = *buf +} + +// recvMessage receives any message from the backend, or returns an error if +// a problem occurred while reading the message. +func (cn *conn) recvMessage(r *readBuf) (byte, error) { + // workaround for a QueryRow bug, see exec + if cn.saveMessageType != 0 { + t := cn.saveMessageType + *r = cn.saveMessageBuffer + cn.saveMessageType = 0 + cn.saveMessageBuffer = nil + return t, nil + } + + x := cn.scratch[:5] + _, err := io.ReadFull(cn.buf, x) + if err != nil { + return 0, err + } + + // read the type and length of the message that follows + t := x[0] + n := int(binary.BigEndian.Uint32(x[1:])) - 4 + var y []byte + if n <= len(cn.scratch) { + y = cn.scratch[:n] + } else { + y = make([]byte, n) + } + _, err = io.ReadFull(cn.buf, y) + if err != nil { + return 0, err + } + *r = y + return t, nil +} + +// recv receives a message from the backend, but if an error happened while +// reading the message or the received message was an ErrorResponse, it panics. +// NoticeResponses are ignored. This function should generally be used only +// during the startup sequence. +func (cn *conn) recv() (t byte, r *readBuf) { + for { + var err error + r = &readBuf{} + t, err = cn.recvMessage(r) + if err != nil { + panic(err) + } + switch t { + case 'E': + panic(parseError(r)) + case 'N': + if n := cn.noticeHandler; n != nil { + n(parseError(r)) + } + case 'A': + if n := cn.notificationHandler; n != nil { + n(recvNotification(r)) + } + default: + return + } + } +} + +// recv1Buf is exactly equivalent to recv1, except it uses a buffer supplied by +// the caller to avoid an allocation. +func (cn *conn) recv1Buf(r *readBuf) byte { + for { + t, err := cn.recvMessage(r) + if err != nil { + panic(err) + } + + switch t { + case 'A': + if n := cn.notificationHandler; n != nil { + n(recvNotification(r)) + } + case 'N': + if n := cn.noticeHandler; n != nil { + n(parseError(r)) + } + case 'S': + cn.processParameterStatus(r) + default: + return t + } + } +} + +// recv1 receives a message from the backend, panicking if an error occurs +// while attempting to read it. All asynchronous messages are ignored, with +// the exception of ErrorResponse. +func (cn *conn) recv1() (t byte, r *readBuf) { + r = &readBuf{} + t = cn.recv1Buf(r) + return t, r +} + +func (cn *conn) ssl(o values) error { + upgrade, err := ssl(o) + if err != nil { + return err + } + + if upgrade == nil { + // Nothing to do + return nil + } + + w := cn.writeBuf(0) + w.int32(80877103) + if err = cn.sendStartupPacket(w); err != nil { + return err + } + + b := cn.scratch[:1] + _, err = io.ReadFull(cn.c, b) + if err != nil { + return err + } + + if b[0] != 'S' { + return ErrSSLNotSupported + } + + cn.c, err = upgrade(cn.c) + return err +} + +// isDriverSetting returns true iff a setting is purely for configuring the +// driver's options and should not be sent to the server in the connection +// startup packet. +func isDriverSetting(key string) bool { + switch key { + case "host", "port": + return true + case "password": + return true + case "sslmode", "sslcert", "sslkey", "sslrootcert", "sslinline": + return true + case "fallback_application_name": + return true + case "connect_timeout": + return true + case "disable_prepared_binary_result": + return true + case "binary_parameters": + return true + case "krbsrvname": + return true + case "krbspn": + return true + default: + return false + } +} + +func (cn *conn) startup(o values) { + w := cn.writeBuf(0) + w.int32(196608) + // Send the backend the name of the database we want to connect to, and the + // user we want to connect as. Additionally, we send over any run-time + // parameters potentially included in the connection string. If the server + // doesn't recognize any of them, it will reply with an error. + for k, v := range o { + if isDriverSetting(k) { + // skip options which can't be run-time parameters + continue + } + // The protocol requires us to supply the database name as "database" + // instead of "dbname". + if k == "dbname" { + k = "database" + } + w.string(k) + w.string(v) + } + w.string("") + if err := cn.sendStartupPacket(w); err != nil { + panic(err) + } + + for { + t, r := cn.recv() + switch t { + case 'K': + cn.processBackendKeyData(r) + case 'S': + cn.processParameterStatus(r) + case 'R': + cn.auth(r, o) + case 'Z': + cn.processReadyForQuery(r) + return + default: + errorf("unknown response for startup: %q", t) + } + } +} + +func (cn *conn) auth(r *readBuf, o values) { + switch code := r.int32(); code { + case 0: + // OK + case 3: + w := cn.writeBuf('p') + w.string(o["password"]) + cn.send(w) + + t, r := cn.recv() + if t != 'R' { + errorf("unexpected password response: %q", t) + } + + if r.int32() != 0 { + errorf("unexpected authentication response: %q", t) + } + case 5: + s := string(r.next(4)) + w := cn.writeBuf('p') + w.string("md5" + md5s(md5s(o["password"]+o["user"])+s)) + cn.send(w) + + t, r := cn.recv() + if t != 'R' { + errorf("unexpected password response: %q", t) + } + + if r.int32() != 0 { + errorf("unexpected authentication response: %q", t) + } + case 7: // GSSAPI, startup + if newGss == nil { + errorf("kerberos error: no GSSAPI provider registered (import github.com/lib/pq/auth/kerberos if you need Kerberos support)") + } + cli, err := newGss() + if err != nil { + errorf("kerberos error: %s", err.Error()) + } + + var token []byte + + if spn, ok := o["krbspn"]; ok { + // Use the supplied SPN if provided.. + token, err = cli.GetInitTokenFromSpn(spn) + } else { + // Allow the kerberos service name to be overridden + service := "postgres" + if val, ok := o["krbsrvname"]; ok { + service = val + } + + token, err = cli.GetInitToken(o["host"], service) + } + + if err != nil { + errorf("failed to get Kerberos ticket: %q", err) + } + + w := cn.writeBuf('p') + w.bytes(token) + cn.send(w) + + // Store for GSSAPI continue message + cn.gss = cli + + case 8: // GSSAPI continue + + if cn.gss == nil { + errorf("GSSAPI protocol error") + } + + b := []byte(*r) + + done, tokOut, err := cn.gss.Continue(b) + if err == nil && !done { + w := cn.writeBuf('p') + w.bytes(tokOut) + cn.send(w) + } + + // Errors fall through and read the more detailed message + // from the server.. + + case 10: + sc := scram.NewClient(sha256.New, o["user"], o["password"]) + sc.Step(nil) + if sc.Err() != nil { + errorf("SCRAM-SHA-256 error: %s", sc.Err().Error()) + } + scOut := sc.Out() + + w := cn.writeBuf('p') + w.string("SCRAM-SHA-256") + w.int32(len(scOut)) + w.bytes(scOut) + cn.send(w) + + t, r := cn.recv() + if t != 'R' { + errorf("unexpected password response: %q", t) + } + + if r.int32() != 11 { + errorf("unexpected authentication response: %q", t) + } + + nextStep := r.next(len(*r)) + sc.Step(nextStep) + if sc.Err() != nil { + errorf("SCRAM-SHA-256 error: %s", sc.Err().Error()) + } + + scOut = sc.Out() + w = cn.writeBuf('p') + w.bytes(scOut) + cn.send(w) + + t, r = cn.recv() + if t != 'R' { + errorf("unexpected password response: %q", t) + } + + if r.int32() != 12 { + errorf("unexpected authentication response: %q", t) + } + + nextStep = r.next(len(*r)) + sc.Step(nextStep) + if sc.Err() != nil { + errorf("SCRAM-SHA-256 error: %s", sc.Err().Error()) + } + + default: + errorf("unknown authentication response: %d", code) + } +} + +type format int + +const formatText format = 0 +const formatBinary format = 1 + +// One result-column format code with the value 1 (i.e. all binary). +var colFmtDataAllBinary = []byte{0, 1, 0, 1} + +// No result-column format codes (i.e. all text). +var colFmtDataAllText = []byte{0, 0} + +type stmt struct { + cn *conn + name string + rowsHeader + colFmtData []byte + paramTyps []oid.Oid + closed bool +} + +func (st *stmt) Close() (err error) { + if st.closed { + return nil + } + if err := st.cn.err.get(); err != nil { + return err + } + defer st.cn.errRecover(&err) + + w := st.cn.writeBuf('C') + w.byte('S') + w.string(st.name) + st.cn.send(w) + + st.cn.send(st.cn.writeBuf('S')) + + t, _ := st.cn.recv1() + if t != '3' { + st.cn.err.set(driver.ErrBadConn) + errorf("unexpected close response: %q", t) + } + st.closed = true + + t, r := st.cn.recv1() + if t != 'Z' { + st.cn.err.set(driver.ErrBadConn) + errorf("expected ready for query, but got: %q", t) + } + st.cn.processReadyForQuery(r) + + return nil +} + +func (st *stmt) Query(v []driver.Value) (r driver.Rows, err error) { + return st.query(v) +} + +func (st *stmt) query(v []driver.Value) (r *rows, err error) { + if err := st.cn.err.get(); err != nil { + return nil, err + } + defer st.cn.errRecover(&err) + + st.exec(v) + return &rows{ + cn: st.cn, + rowsHeader: st.rowsHeader, + }, nil +} + +func (st *stmt) Exec(v []driver.Value) (res driver.Result, err error) { + if err := st.cn.err.get(); err != nil { + return nil, err + } + defer st.cn.errRecover(&err) + + st.exec(v) + res, _, err = st.cn.readExecuteResponse("simple query") + return res, err +} + +func (st *stmt) exec(v []driver.Value) { + if len(v) >= 65536 { + errorf("got %d parameters but PostgreSQL only supports 65535 parameters", len(v)) + } + if len(v) != len(st.paramTyps) { + errorf("got %d parameters but the statement requires %d", len(v), len(st.paramTyps)) + } + + cn := st.cn + w := cn.writeBuf('B') + w.byte(0) // unnamed portal + w.string(st.name) + + if cn.binaryParameters { + cn.sendBinaryParameters(w, v) + } else { + w.int16(0) + w.int16(len(v)) + for i, x := range v { + if x == nil { + w.int32(-1) + } else { + b := encode(&cn.parameterStatus, x, st.paramTyps[i]) + w.int32(len(b)) + w.bytes(b) + } + } + } + w.bytes(st.colFmtData) + + w.next('E') + w.byte(0) + w.int32(0) + + w.next('S') + cn.send(w) + + cn.readBindResponse() + cn.postExecuteWorkaround() + +} + +func (st *stmt) NumInput() int { + return len(st.paramTyps) +} + +// parseComplete parses the "command tag" from a CommandComplete message, and +// returns the number of rows affected (if applicable) and a string +// identifying only the command that was executed, e.g. "ALTER TABLE". If the +// command tag could not be parsed, parseComplete panics. +func (cn *conn) parseComplete(commandTag string) (driver.Result, string) { + commandsWithAffectedRows := []string{ + "SELECT ", + // INSERT is handled below + "UPDATE ", + "DELETE ", + "FETCH ", + "MOVE ", + "COPY ", + } + + var affectedRows *string + for _, tag := range commandsWithAffectedRows { + if strings.HasPrefix(commandTag, tag) { + t := commandTag[len(tag):] + affectedRows = &t + commandTag = tag[:len(tag)-1] + break + } + } + // INSERT also includes the oid of the inserted row in its command tag. + // Oids in user tables are deprecated, and the oid is only returned when + // exactly one row is inserted, so it's unlikely to be of value to any + // real-world application and we can ignore it. + if affectedRows == nil && strings.HasPrefix(commandTag, "INSERT ") { + parts := strings.Split(commandTag, " ") + if len(parts) != 3 { + cn.err.set(driver.ErrBadConn) + errorf("unexpected INSERT command tag %s", commandTag) + } + affectedRows = &parts[len(parts)-1] + commandTag = "INSERT" + } + // There should be no affected rows attached to the tag, just return it + if affectedRows == nil { + return driver.RowsAffected(0), commandTag + } + n, err := strconv.ParseInt(*affectedRows, 10, 64) + if err != nil { + cn.err.set(driver.ErrBadConn) + errorf("could not parse commandTag: %s", err) + } + return driver.RowsAffected(n), commandTag +} + +type rowsHeader struct { + colNames []string + colTyps []fieldDesc + colFmts []format +} + +type rows struct { + cn *conn + finish func() + rowsHeader + done bool + rb readBuf + result driver.Result + tag string + + next *rowsHeader +} + +func (rs *rows) Close() error { + if finish := rs.finish; finish != nil { + defer finish() + } + // no need to look at cn.bad as Next() will + for { + err := rs.Next(nil) + switch err { + case nil: + case io.EOF: + // rs.Next can return io.EOF on both 'Z' (ready for query) and 'T' (row + // description, used with HasNextResultSet). We need to fetch messages until + // we hit a 'Z', which is done by waiting for done to be set. + if rs.done { + return nil + } + default: + return err + } + } +} + +func (rs *rows) Columns() []string { + return rs.colNames +} + +func (rs *rows) Result() driver.Result { + if rs.result == nil { + return emptyRows + } + return rs.result +} + +func (rs *rows) Tag() string { + return rs.tag +} + +func (rs *rows) Next(dest []driver.Value) (err error) { + if rs.done { + return io.EOF + } + + conn := rs.cn + if err := conn.err.getForNext(); err != nil { + return err + } + defer conn.errRecover(&err) + + for { + t := conn.recv1Buf(&rs.rb) + switch t { + case 'E': + err = parseError(&rs.rb) + case 'C', 'I': + if t == 'C' { + rs.result, rs.tag = conn.parseComplete(rs.rb.string()) + } + continue + case 'Z': + conn.processReadyForQuery(&rs.rb) + rs.done = true + if err != nil { + return err + } + return io.EOF + case 'D': + n := rs.rb.int16() + if err != nil { + conn.err.set(driver.ErrBadConn) + errorf("unexpected DataRow after error %s", err) + } + if n < len(dest) { + dest = dest[:n] + } + for i := range dest { + l := rs.rb.int32() + if l == -1 { + dest[i] = nil + continue + } + dest[i] = decode(&conn.parameterStatus, rs.rb.next(l), rs.colTyps[i].OID, rs.colFmts[i]) + } + return + case 'T': + next := parsePortalRowDescribe(&rs.rb) + rs.next = &next + return io.EOF + default: + errorf("unexpected message after execute: %q", t) + } + } +} + +func (rs *rows) HasNextResultSet() bool { + hasNext := rs.next != nil && !rs.done + return hasNext +} + +func (rs *rows) NextResultSet() error { + if rs.next == nil { + return io.EOF + } + rs.rowsHeader = *rs.next + rs.next = nil + return nil +} + +// QuoteIdentifier quotes an "identifier" (e.g. a table or a column name) to be +// used as part of an SQL statement. For example: +// +// tblname := "my_table" +// data := "my_data" +// quoted := pq.QuoteIdentifier(tblname) +// err := db.Exec(fmt.Sprintf("INSERT INTO %s VALUES ($1)", quoted), data) +// +// Any double quotes in name will be escaped. The quoted identifier will be +// case sensitive when used in a query. If the input string contains a zero +// byte, the result will be truncated immediately before it. +func QuoteIdentifier(name string) string { + end := strings.IndexRune(name, 0) + if end > -1 { + name = name[:end] + } + return `"` + strings.Replace(name, `"`, `""`, -1) + `"` +} + +// QuoteLiteral quotes a 'literal' (e.g. a parameter, often used to pass literal +// to DDL and other statements that do not accept parameters) to be used as part +// of an SQL statement. For example: +// +// exp_date := pq.QuoteLiteral("2023-01-05 15:00:00Z") +// err := db.Exec(fmt.Sprintf("CREATE ROLE my_user VALID UNTIL %s", exp_date)) +// +// Any single quotes in name will be escaped. Any backslashes (i.e. "\") will be +// replaced by two backslashes (i.e. "\\") and the C-style escape identifier +// that PostgreSQL provides ('E') will be prepended to the string. +func QuoteLiteral(literal string) string { + // This follows the PostgreSQL internal algorithm for handling quoted literals + // from libpq, which can be found in the "PQEscapeStringInternal" function, + // which is found in the libpq/fe-exec.c source file: + // https://git.postgresql.org/gitweb/?p=postgresql.git;a=blob;f=src/interfaces/libpq/fe-exec.c + // + // substitute any single-quotes (') with two single-quotes ('') + literal = strings.Replace(literal, `'`, `''`, -1) + // determine if the string has any backslashes (\) in it. + // if it does, replace any backslashes (\) with two backslashes (\\) + // then, we need to wrap the entire string with a PostgreSQL + // C-style escape. Per how "PQEscapeStringInternal" handles this case, we + // also add a space before the "E" + if strings.Contains(literal, `\`) { + literal = strings.Replace(literal, `\`, `\\`, -1) + literal = ` E'` + literal + `'` + } else { + // otherwise, we can just wrap the literal with a pair of single quotes + literal = `'` + literal + `'` + } + return literal +} + +func md5s(s string) string { + h := md5.New() + h.Write([]byte(s)) + return fmt.Sprintf("%x", h.Sum(nil)) +} + +func (cn *conn) sendBinaryParameters(b *writeBuf, args []driver.Value) { + // Do one pass over the parameters to see if we're going to send any of + // them over in binary. If we are, create a paramFormats array at the + // same time. + var paramFormats []int + for i, x := range args { + _, ok := x.([]byte) + if ok { + if paramFormats == nil { + paramFormats = make([]int, len(args)) + } + paramFormats[i] = 1 + } + } + if paramFormats == nil { + b.int16(0) + } else { + b.int16(len(paramFormats)) + for _, x := range paramFormats { + b.int16(x) + } + } + + b.int16(len(args)) + for _, x := range args { + if x == nil { + b.int32(-1) + } else { + datum := binaryEncode(&cn.parameterStatus, x) + b.int32(len(datum)) + b.bytes(datum) + } + } +} + +func (cn *conn) sendBinaryModeQuery(query string, args []driver.Value) { + if len(args) >= 65536 { + errorf("got %d parameters but PostgreSQL only supports 65535 parameters", len(args)) + } + + b := cn.writeBuf('P') + b.byte(0) // unnamed statement + b.string(query) + b.int16(0) + + b.next('B') + b.int16(0) // unnamed portal and statement + cn.sendBinaryParameters(b, args) + b.bytes(colFmtDataAllText) + + b.next('D') + b.byte('P') + b.byte(0) // unnamed portal + + b.next('E') + b.byte(0) + b.int32(0) + + b.next('S') + cn.send(b) +} + +func (cn *conn) processParameterStatus(r *readBuf) { + var err error + + param := r.string() + switch param { + case "server_version": + var major1 int + var major2 int + _, err = fmt.Sscanf(r.string(), "%d.%d", &major1, &major2) + if err == nil { + cn.parameterStatus.serverVersion = major1*10000 + major2*100 + } + + case "TimeZone": + cn.parameterStatus.currentLocation, err = time.LoadLocation(r.string()) + if err != nil { + cn.parameterStatus.currentLocation = nil + } + + default: + // ignore + } +} + +func (cn *conn) processReadyForQuery(r *readBuf) { + cn.txnStatus = transactionStatus(r.byte()) +} + +func (cn *conn) readReadyForQuery() { + t, r := cn.recv1() + switch t { + case 'Z': + cn.processReadyForQuery(r) + return + default: + cn.err.set(driver.ErrBadConn) + errorf("unexpected message %q; expected ReadyForQuery", t) + } +} + +func (cn *conn) processBackendKeyData(r *readBuf) { + cn.processID = r.int32() + cn.secretKey = r.int32() +} + +func (cn *conn) readParseResponse() { + t, r := cn.recv1() + switch t { + case '1': + return + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + default: + cn.err.set(driver.ErrBadConn) + errorf("unexpected Parse response %q", t) + } +} + +func (cn *conn) readStatementDescribeResponse() (paramTyps []oid.Oid, colNames []string, colTyps []fieldDesc) { + for { + t, r := cn.recv1() + switch t { + case 't': + nparams := r.int16() + paramTyps = make([]oid.Oid, nparams) + for i := range paramTyps { + paramTyps[i] = r.oid() + } + case 'n': + return paramTyps, nil, nil + case 'T': + colNames, colTyps = parseStatementRowDescribe(r) + return paramTyps, colNames, colTyps + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + default: + cn.err.set(driver.ErrBadConn) + errorf("unexpected Describe statement response %q", t) + } + } +} + +func (cn *conn) readPortalDescribeResponse() rowsHeader { + t, r := cn.recv1() + switch t { + case 'T': + return parsePortalRowDescribe(r) + case 'n': + return rowsHeader{} + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + default: + cn.err.set(driver.ErrBadConn) + errorf("unexpected Describe response %q", t) + } + panic("not reached") +} + +func (cn *conn) readBindResponse() { + t, r := cn.recv1() + switch t { + case '2': + return + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + default: + cn.err.set(driver.ErrBadConn) + errorf("unexpected Bind response %q", t) + } +} + +func (cn *conn) postExecuteWorkaround() { + // Work around a bug in sql.DB.QueryRow: in Go 1.2 and earlier it ignores + // any errors from rows.Next, which masks errors that happened during the + // execution of the query. To avoid the problem in common cases, we wait + // here for one more message from the database. If it's not an error the + // query will likely succeed (or perhaps has already, if it's a + // CommandComplete), so we push the message into the conn struct; recv1 + // will return it as the next message for rows.Next or rows.Close. + // However, if it's an error, we wait until ReadyForQuery and then return + // the error to our caller. + for { + t, r := cn.recv1() + switch t { + case 'E': + err := parseError(r) + cn.readReadyForQuery() + panic(err) + case 'C', 'D', 'I': + // the query didn't fail, but we can't process this message + cn.saveMessage(t, r) + return + default: + cn.err.set(driver.ErrBadConn) + errorf("unexpected message during extended query execution: %q", t) + } + } +} + +// Only for Exec(), since we ignore the returned data +func (cn *conn) readExecuteResponse(protocolState string) (res driver.Result, commandTag string, err error) { + for { + t, r := cn.recv1() + switch t { + case 'C': + if err != nil { + cn.err.set(driver.ErrBadConn) + errorf("unexpected CommandComplete after error %s", err) + } + res, commandTag = cn.parseComplete(r.string()) + case 'Z': + cn.processReadyForQuery(r) + if res == nil && err == nil { + err = errUnexpectedReady + } + return res, commandTag, err + case 'E': + err = parseError(r) + case 'T', 'D', 'I': + if err != nil { + cn.err.set(driver.ErrBadConn) + errorf("unexpected %q after error %s", t, err) + } + if t == 'I' { + res = emptyRows + } + // ignore any results + default: + cn.err.set(driver.ErrBadConn) + errorf("unknown %s response: %q", protocolState, t) + } + } +} + +func parseStatementRowDescribe(r *readBuf) (colNames []string, colTyps []fieldDesc) { + n := r.int16() + colNames = make([]string, n) + colTyps = make([]fieldDesc, n) + for i := range colNames { + colNames[i] = r.string() + r.next(6) + colTyps[i].OID = r.oid() + colTyps[i].Len = r.int16() + colTyps[i].Mod = r.int32() + // format code not known when describing a statement; always 0 + r.next(2) + } + return +} + +func parsePortalRowDescribe(r *readBuf) rowsHeader { + n := r.int16() + colNames := make([]string, n) + colFmts := make([]format, n) + colTyps := make([]fieldDesc, n) + for i := range colNames { + colNames[i] = r.string() + r.next(6) + colTyps[i].OID = r.oid() + colTyps[i].Len = r.int16() + colTyps[i].Mod = r.int32() + colFmts[i] = format(r.int16()) + } + return rowsHeader{ + colNames: colNames, + colFmts: colFmts, + colTyps: colTyps, + } +} + +// parseEnviron tries to mimic some of libpq's environment handling +// +// To ease testing, it does not directly reference os.Environ, but is +// designed to accept its output. +// +// Environment-set connection information is intended to have a higher +// precedence than a library default but lower than any explicitly +// passed information (such as in the URL or connection string). +func parseEnviron(env []string) (out map[string]string) { + out = make(map[string]string) + + for _, v := range env { + parts := strings.SplitN(v, "=", 2) + + accrue := func(keyname string) { + out[keyname] = parts[1] + } + unsupported := func() { + panic(fmt.Sprintf("setting %v not supported", parts[0])) + } + + // The order of these is the same as is seen in the + // PostgreSQL 9.1 manual. Unsupported but well-defined + // keys cause a panic; these should be unset prior to + // execution. Options which pq expects to be set to a + // certain value are allowed, but must be set to that + // value if present (they can, of course, be absent). + switch parts[0] { + case "PGHOST": + accrue("host") + case "PGHOSTADDR": + unsupported() + case "PGPORT": + accrue("port") + case "PGDATABASE": + accrue("dbname") + case "PGUSER": + accrue("user") + case "PGPASSWORD": + accrue("password") + case "PGSERVICE", "PGSERVICEFILE", "PGREALM": + unsupported() + case "PGOPTIONS": + accrue("options") + case "PGAPPNAME": + accrue("application_name") + case "PGSSLMODE": + accrue("sslmode") + case "PGSSLCERT": + accrue("sslcert") + case "PGSSLKEY": + accrue("sslkey") + case "PGSSLROOTCERT": + accrue("sslrootcert") + case "PGREQUIRESSL", "PGSSLCRL": + unsupported() + case "PGREQUIREPEER": + unsupported() + case "PGKRBSRVNAME", "PGGSSLIB": + unsupported() + case "PGCONNECT_TIMEOUT": + accrue("connect_timeout") + case "PGCLIENTENCODING": + accrue("client_encoding") + case "PGDATESTYLE": + accrue("datestyle") + case "PGTZ": + accrue("timezone") + case "PGGEQO": + accrue("geqo") + case "PGSYSCONFDIR", "PGLOCALEDIR": + unsupported() + } + } + + return out +} + +// isUTF8 returns whether name is a fuzzy variation of the string "UTF-8". +func isUTF8(name string) bool { + // Recognize all sorts of silly things as "UTF-8", like Postgres does + s := strings.Map(alnumLowerASCII, name) + return s == "utf8" || s == "unicode" +} + +func alnumLowerASCII(ch rune) rune { + if 'A' <= ch && ch <= 'Z' { + return ch + ('a' - 'A') + } + if 'a' <= ch && ch <= 'z' || '0' <= ch && ch <= '9' { + return ch + } + return -1 // discard +} diff --git a/vendor/github.com/lib/pq/conn_go18.go b/vendor/github.com/lib/pq/conn_go18.go new file mode 100644 index 0000000000..63d4ca6aaa --- /dev/null +++ b/vendor/github.com/lib/pq/conn_go18.go @@ -0,0 +1,247 @@ +package pq + +import ( + "context" + "database/sql" + "database/sql/driver" + "fmt" + "io" + "io/ioutil" + "time" +) + +const ( + watchCancelDialContextTimeout = time.Second * 10 +) + +// Implement the "QueryerContext" interface +func (cn *conn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) { + list := make([]driver.Value, len(args)) + for i, nv := range args { + list[i] = nv.Value + } + finish := cn.watchCancel(ctx) + r, err := cn.query(query, list) + if err != nil { + if finish != nil { + finish() + } + return nil, err + } + r.finish = finish + return r, nil +} + +// Implement the "ExecerContext" interface +func (cn *conn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) { + list := make([]driver.Value, len(args)) + for i, nv := range args { + list[i] = nv.Value + } + + if finish := cn.watchCancel(ctx); finish != nil { + defer finish() + } + + return cn.Exec(query, list) +} + +// Implement the "ConnPrepareContext" interface +func (cn *conn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) { + if finish := cn.watchCancel(ctx); finish != nil { + defer finish() + } + return cn.Prepare(query) +} + +// Implement the "ConnBeginTx" interface +func (cn *conn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) { + var mode string + + switch sql.IsolationLevel(opts.Isolation) { + case sql.LevelDefault: + // Don't touch mode: use the server's default + case sql.LevelReadUncommitted: + mode = " ISOLATION LEVEL READ UNCOMMITTED" + case sql.LevelReadCommitted: + mode = " ISOLATION LEVEL READ COMMITTED" + case sql.LevelRepeatableRead: + mode = " ISOLATION LEVEL REPEATABLE READ" + case sql.LevelSerializable: + mode = " ISOLATION LEVEL SERIALIZABLE" + default: + return nil, fmt.Errorf("pq: isolation level not supported: %d", opts.Isolation) + } + + if opts.ReadOnly { + mode += " READ ONLY" + } else { + mode += " READ WRITE" + } + + tx, err := cn.begin(mode) + if err != nil { + return nil, err + } + cn.txnFinish = cn.watchCancel(ctx) + return tx, nil +} + +func (cn *conn) Ping(ctx context.Context) error { + if finish := cn.watchCancel(ctx); finish != nil { + defer finish() + } + rows, err := cn.simpleQuery(";") + if err != nil { + return driver.ErrBadConn // https://golang.org/pkg/database/sql/driver/#Pinger + } + rows.Close() + return nil +} + +func (cn *conn) watchCancel(ctx context.Context) func() { + if done := ctx.Done(); done != nil { + finished := make(chan struct{}, 1) + go func() { + select { + case <-done: + select { + case finished <- struct{}{}: + default: + // We raced with the finish func, let the next query handle this with the + // context. + return + } + + // Set the connection state to bad so it does not get reused. + cn.err.set(ctx.Err()) + + // At this point the function level context is canceled, + // so it must not be used for the additional network + // request to cancel the query. + // Create a new context to pass into the dial. + ctxCancel, cancel := context.WithTimeout(context.Background(), watchCancelDialContextTimeout) + defer cancel() + + _ = cn.cancel(ctxCancel) + case <-finished: + } + }() + return func() { + select { + case <-finished: + cn.err.set(ctx.Err()) + cn.Close() + case finished <- struct{}{}: + } + } + } + return nil +} + +func (cn *conn) cancel(ctx context.Context) error { + // Create a new values map (copy). This makes sure the connection created + // in this method cannot write to the same underlying data, which could + // cause a concurrent map write panic. This is necessary because cancel + // is called from a goroutine in watchCancel. + o := make(values) + for k, v := range cn.opts { + o[k] = v + } + + c, err := dial(ctx, cn.dialer, o) + if err != nil { + return err + } + defer c.Close() + + { + can := conn{ + c: c, + } + err = can.ssl(o) + if err != nil { + return err + } + + w := can.writeBuf(0) + w.int32(80877102) // cancel request code + w.int32(cn.processID) + w.int32(cn.secretKey) + + if err := can.sendStartupPacket(w); err != nil { + return err + } + } + + // Read until EOF to ensure that the server received the cancel. + { + _, err := io.Copy(ioutil.Discard, c) + return err + } +} + +// Implement the "StmtQueryContext" interface +func (st *stmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) { + list := make([]driver.Value, len(args)) + for i, nv := range args { + list[i] = nv.Value + } + finish := st.watchCancel(ctx) + r, err := st.query(list) + if err != nil { + if finish != nil { + finish() + } + return nil, err + } + r.finish = finish + return r, nil +} + +// Implement the "StmtExecContext" interface +func (st *stmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) { + list := make([]driver.Value, len(args)) + for i, nv := range args { + list[i] = nv.Value + } + + if finish := st.watchCancel(ctx); finish != nil { + defer finish() + } + + return st.Exec(list) +} + +// watchCancel is implemented on stmt in order to not mark the parent conn as bad +func (st *stmt) watchCancel(ctx context.Context) func() { + if done := ctx.Done(); done != nil { + finished := make(chan struct{}) + go func() { + select { + case <-done: + // At this point the function level context is canceled, + // so it must not be used for the additional network + // request to cancel the query. + // Create a new context to pass into the dial. + ctxCancel, cancel := context.WithTimeout(context.Background(), watchCancelDialContextTimeout) + defer cancel() + + _ = st.cancel(ctxCancel) + finished <- struct{}{} + case <-finished: + } + }() + return func() { + select { + case <-finished: + case finished <- struct{}{}: + } + } + } + return nil +} + +func (st *stmt) cancel(ctx context.Context) error { + return st.cn.cancel(ctx) +} diff --git a/vendor/github.com/lib/pq/connector.go b/vendor/github.com/lib/pq/connector.go new file mode 100644 index 0000000000..d7d4726156 --- /dev/null +++ b/vendor/github.com/lib/pq/connector.go @@ -0,0 +1,115 @@ +package pq + +import ( + "context" + "database/sql/driver" + "errors" + "fmt" + "os" + "strings" +) + +// Connector represents a fixed configuration for the pq driver with a given +// name. Connector satisfies the database/sql/driver Connector interface and +// can be used to create any number of DB Conn's via the database/sql OpenDB +// function. +// +// See https://golang.org/pkg/database/sql/driver/#Connector. +// See https://golang.org/pkg/database/sql/#OpenDB. +type Connector struct { + opts values + dialer Dialer +} + +// Connect returns a connection to the database using the fixed configuration +// of this Connector. Context is not used. +func (c *Connector) Connect(ctx context.Context) (driver.Conn, error) { + return c.open(ctx) +} + +// Driver returns the underlying driver of this Connector. +func (c *Connector) Driver() driver.Driver { + return &Driver{} +} + +// NewConnector returns a connector for the pq driver in a fixed configuration +// with the given dsn. The returned connector can be used to create any number +// of equivalent Conn's. The returned connector is intended to be used with +// database/sql.OpenDB. +// +// See https://golang.org/pkg/database/sql/driver/#Connector. +// See https://golang.org/pkg/database/sql/#OpenDB. +func NewConnector(dsn string) (*Connector, error) { + var err error + o := make(values) + + // A number of defaults are applied here, in this order: + // + // * Very low precedence defaults applied in every situation + // * Environment variables + // * Explicitly passed connection information + o["host"] = "localhost" + o["port"] = "5432" + // N.B.: Extra float digits should be set to 3, but that breaks + // Postgres 8.4 and older, where the max is 2. + o["extra_float_digits"] = "2" + for k, v := range parseEnviron(os.Environ()) { + o[k] = v + } + + if strings.HasPrefix(dsn, "postgres://") || strings.HasPrefix(dsn, "postgresql://") { + dsn, err = ParseURL(dsn) + if err != nil { + return nil, err + } + } + + if err := parseOpts(dsn, o); err != nil { + return nil, err + } + + // Use the "fallback" application name if necessary + if fallback, ok := o["fallback_application_name"]; ok { + if _, ok := o["application_name"]; !ok { + o["application_name"] = fallback + } + } + + // We can't work with any client_encoding other than UTF-8 currently. + // However, we have historically allowed the user to set it to UTF-8 + // explicitly, and there's no reason to break such programs, so allow that. + // Note that the "options" setting could also set client_encoding, but + // parsing its value is not worth it. Instead, we always explicitly send + // client_encoding as a separate run-time parameter, which should override + // anything set in options. + if enc, ok := o["client_encoding"]; ok && !isUTF8(enc) { + return nil, errors.New("client_encoding must be absent or 'UTF8'") + } + o["client_encoding"] = "UTF8" + // DateStyle needs a similar treatment. + if datestyle, ok := o["datestyle"]; ok { + if datestyle != "ISO, MDY" { + return nil, fmt.Errorf("setting datestyle must be absent or %v; got %v", "ISO, MDY", datestyle) + } + } else { + o["datestyle"] = "ISO, MDY" + } + + // If a user is not provided by any other means, the last + // resort is to use the current operating system provided user + // name. + if _, ok := o["user"]; !ok { + u, err := userCurrent() + if err != nil { + return nil, err + } + o["user"] = u + } + + // SSL is not necessary or supported over UNIX domain sockets + if network, _ := network(o); network == "unix" { + o["sslmode"] = "disable" + } + + return &Connector{opts: o, dialer: defaultDialer{}}, nil +} diff --git a/vendor/github.com/lib/pq/copy.go b/vendor/github.com/lib/pq/copy.go new file mode 100644 index 0000000000..c072bc3bc4 --- /dev/null +++ b/vendor/github.com/lib/pq/copy.go @@ -0,0 +1,303 @@ +package pq + +import ( + "database/sql/driver" + "encoding/binary" + "errors" + "fmt" + "sync" +) + +var ( + errCopyInClosed = errors.New("pq: copyin statement has already been closed") + errBinaryCopyNotSupported = errors.New("pq: only text format supported for COPY") + errCopyToNotSupported = errors.New("pq: COPY TO is not supported") + errCopyNotSupportedOutsideTxn = errors.New("pq: COPY is only allowed inside a transaction") + errCopyInProgress = errors.New("pq: COPY in progress") +) + +// CopyIn creates a COPY FROM statement which can be prepared with +// Tx.Prepare(). The target table should be visible in search_path. +func CopyIn(table string, columns ...string) string { + stmt := "COPY " + QuoteIdentifier(table) + " (" + for i, col := range columns { + if i != 0 { + stmt += ", " + } + stmt += QuoteIdentifier(col) + } + stmt += ") FROM STDIN" + return stmt +} + +// CopyInSchema creates a COPY FROM statement which can be prepared with +// Tx.Prepare(). +func CopyInSchema(schema, table string, columns ...string) string { + stmt := "COPY " + QuoteIdentifier(schema) + "." + QuoteIdentifier(table) + " (" + for i, col := range columns { + if i != 0 { + stmt += ", " + } + stmt += QuoteIdentifier(col) + } + stmt += ") FROM STDIN" + return stmt +} + +type copyin struct { + cn *conn + buffer []byte + rowData chan []byte + done chan bool + + closed bool + + mu struct { + sync.Mutex + err error + driver.Result + } +} + +const ciBufferSize = 64 * 1024 + +// flush buffer before the buffer is filled up and needs reallocation +const ciBufferFlushSize = 63 * 1024 + +func (cn *conn) prepareCopyIn(q string) (_ driver.Stmt, err error) { + if !cn.isInTransaction() { + return nil, errCopyNotSupportedOutsideTxn + } + + ci := ©in{ + cn: cn, + buffer: make([]byte, 0, ciBufferSize), + rowData: make(chan []byte), + done: make(chan bool, 1), + } + // add CopyData identifier + 4 bytes for message length + ci.buffer = append(ci.buffer, 'd', 0, 0, 0, 0) + + b := cn.writeBuf('Q') + b.string(q) + cn.send(b) + +awaitCopyInResponse: + for { + t, r := cn.recv1() + switch t { + case 'G': + if r.byte() != 0 { + err = errBinaryCopyNotSupported + break awaitCopyInResponse + } + go ci.resploop() + return ci, nil + case 'H': + err = errCopyToNotSupported + break awaitCopyInResponse + case 'E': + err = parseError(r) + case 'Z': + if err == nil { + ci.setBad(driver.ErrBadConn) + errorf("unexpected ReadyForQuery in response to COPY") + } + cn.processReadyForQuery(r) + return nil, err + default: + ci.setBad(driver.ErrBadConn) + errorf("unknown response for copy query: %q", t) + } + } + + // something went wrong, abort COPY before we return + b = cn.writeBuf('f') + b.string(err.Error()) + cn.send(b) + + for { + t, r := cn.recv1() + switch t { + case 'c', 'C', 'E': + case 'Z': + // correctly aborted, we're done + cn.processReadyForQuery(r) + return nil, err + default: + ci.setBad(driver.ErrBadConn) + errorf("unknown response for CopyFail: %q", t) + } + } +} + +func (ci *copyin) flush(buf []byte) { + // set message length (without message identifier) + binary.BigEndian.PutUint32(buf[1:], uint32(len(buf)-1)) + + _, err := ci.cn.c.Write(buf) + if err != nil { + panic(err) + } +} + +func (ci *copyin) resploop() { + for { + var r readBuf + t, err := ci.cn.recvMessage(&r) + if err != nil { + ci.setBad(driver.ErrBadConn) + ci.setError(err) + ci.done <- true + return + } + switch t { + case 'C': + // complete + res, _ := ci.cn.parseComplete(r.string()) + ci.setResult(res) + case 'N': + if n := ci.cn.noticeHandler; n != nil { + n(parseError(&r)) + } + case 'Z': + ci.cn.processReadyForQuery(&r) + ci.done <- true + return + case 'E': + err := parseError(&r) + ci.setError(err) + default: + ci.setBad(driver.ErrBadConn) + ci.setError(fmt.Errorf("unknown response during CopyIn: %q", t)) + ci.done <- true + return + } + } +} + +func (ci *copyin) setBad(err error) { + ci.cn.err.set(err) +} + +func (ci *copyin) getBad() error { + return ci.cn.err.get() +} + +func (ci *copyin) err() error { + ci.mu.Lock() + err := ci.mu.err + ci.mu.Unlock() + return err +} + +// setError() sets ci.err if one has not been set already. Caller must not be +// holding ci.Mutex. +func (ci *copyin) setError(err error) { + ci.mu.Lock() + if ci.mu.err == nil { + ci.mu.err = err + } + ci.mu.Unlock() +} + +func (ci *copyin) setResult(result driver.Result) { + ci.mu.Lock() + ci.mu.Result = result + ci.mu.Unlock() +} + +func (ci *copyin) getResult() driver.Result { + ci.mu.Lock() + result := ci.mu.Result + ci.mu.Unlock() + if result == nil { + return driver.RowsAffected(0) + } + return result +} + +func (ci *copyin) NumInput() int { + return -1 +} + +func (ci *copyin) Query(v []driver.Value) (r driver.Rows, err error) { + return nil, ErrNotSupported +} + +// Exec inserts values into the COPY stream. The insert is asynchronous +// and Exec can return errors from previous Exec calls to the same +// COPY stmt. +// +// You need to call Exec(nil) to sync the COPY stream and to get any +// errors from pending data, since Stmt.Close() doesn't return errors +// to the user. +func (ci *copyin) Exec(v []driver.Value) (r driver.Result, err error) { + if ci.closed { + return nil, errCopyInClosed + } + + if err := ci.getBad(); err != nil { + return nil, err + } + defer ci.cn.errRecover(&err) + + if err := ci.err(); err != nil { + return nil, err + } + + if len(v) == 0 { + if err := ci.Close(); err != nil { + return driver.RowsAffected(0), err + } + + return ci.getResult(), nil + } + + numValues := len(v) + for i, value := range v { + ci.buffer = appendEncodedText(&ci.cn.parameterStatus, ci.buffer, value) + if i < numValues-1 { + ci.buffer = append(ci.buffer, '\t') + } + } + + ci.buffer = append(ci.buffer, '\n') + + if len(ci.buffer) > ciBufferFlushSize { + ci.flush(ci.buffer) + // reset buffer, keep bytes for message identifier and length + ci.buffer = ci.buffer[:5] + } + + return driver.RowsAffected(0), nil +} + +func (ci *copyin) Close() (err error) { + if ci.closed { // Don't do anything, we're already closed + return nil + } + ci.closed = true + + if err := ci.getBad(); err != nil { + return err + } + defer ci.cn.errRecover(&err) + + if len(ci.buffer) > 0 { + ci.flush(ci.buffer) + } + // Avoid touching the scratch buffer as resploop could be using it. + err = ci.cn.sendSimpleMessage('c') + if err != nil { + return err + } + + <-ci.done + ci.cn.inCopy = false + + if err := ci.err(); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/lib/pq/doc.go b/vendor/github.com/lib/pq/doc.go new file mode 100644 index 0000000000..b57184801b --- /dev/null +++ b/vendor/github.com/lib/pq/doc.go @@ -0,0 +1,268 @@ +/* +Package pq is a pure Go Postgres driver for the database/sql package. + +In most cases clients will use the database/sql package instead of +using this package directly. For example: + + import ( + "database/sql" + + _ "github.com/lib/pq" + ) + + func main() { + connStr := "user=pqgotest dbname=pqgotest sslmode=verify-full" + db, err := sql.Open("postgres", connStr) + if err != nil { + log.Fatal(err) + } + + age := 21 + rows, err := db.Query("SELECT name FROM users WHERE age = $1", age) + … + } + +You can also connect to a database using a URL. For example: + + connStr := "postgres://pqgotest:password@localhost/pqgotest?sslmode=verify-full" + db, err := sql.Open("postgres", connStr) + + +Connection String Parameters + + +Similarly to libpq, when establishing a connection using pq you are expected to +supply a connection string containing zero or more parameters. +A subset of the connection parameters supported by libpq are also supported by pq. +Additionally, pq also lets you specify run-time parameters (such as search_path or work_mem) +directly in the connection string. This is different from libpq, which does not allow +run-time parameters in the connection string, instead requiring you to supply +them in the options parameter. + +For compatibility with libpq, the following special connection parameters are +supported: + + * dbname - The name of the database to connect to + * user - The user to sign in as + * password - The user's password + * host - The host to connect to. Values that start with / are for unix + domain sockets. (default is localhost) + * port - The port to bind to. (default is 5432) + * sslmode - Whether or not to use SSL (default is require, this is not + the default for libpq) + * fallback_application_name - An application_name to fall back to if one isn't provided. + * connect_timeout - Maximum wait for connection, in seconds. Zero or + not specified means wait indefinitely. + * sslcert - Cert file location. The file must contain PEM encoded data. + * sslkey - Key file location. The file must contain PEM encoded data. + * sslrootcert - The location of the root certificate file. The file + must contain PEM encoded data. + +Valid values for sslmode are: + + * disable - No SSL + * require - Always SSL (skip verification) + * verify-ca - Always SSL (verify that the certificate presented by the + server was signed by a trusted CA) + * verify-full - Always SSL (verify that the certification presented by + the server was signed by a trusted CA and the server host name + matches the one in the certificate) + +See http://www.postgresql.org/docs/current/static/libpq-connect.html#LIBPQ-CONNSTRING +for more information about connection string parameters. + +Use single quotes for values that contain whitespace: + + "user=pqgotest password='with spaces'" + +A backslash will escape the next character in values: + + "user=space\ man password='it\'s valid'" + +Note that the connection parameter client_encoding (which sets the +text encoding for the connection) may be set but must be "UTF8", +matching with the same rules as Postgres. It is an error to provide +any other value. + +In addition to the parameters listed above, any run-time parameter that can be +set at backend start time can be set in the connection string. For more +information, see +http://www.postgresql.org/docs/current/static/runtime-config.html. + +Most environment variables as specified at http://www.postgresql.org/docs/current/static/libpq-envars.html +supported by libpq are also supported by pq. If any of the environment +variables not supported by pq are set, pq will panic during connection +establishment. Environment variables have a lower precedence than explicitly +provided connection parameters. + +The pgpass mechanism as described in http://www.postgresql.org/docs/current/static/libpq-pgpass.html +is supported, but on Windows PGPASSFILE must be specified explicitly. + + +Queries + + +database/sql does not dictate any specific format for parameter +markers in query strings, and pq uses the Postgres-native ordinal markers, +as shown above. The same marker can be reused for the same parameter: + + rows, err := db.Query(`SELECT name FROM users WHERE favorite_fruit = $1 + OR age BETWEEN $2 AND $2 + 3`, "orange", 64) + +pq does not support the LastInsertId() method of the Result type in database/sql. +To return the identifier of an INSERT (or UPDATE or DELETE), use the Postgres +RETURNING clause with a standard Query or QueryRow call: + + var userid int + err := db.QueryRow(`INSERT INTO users(name, favorite_fruit, age) + VALUES('beatrice', 'starfruit', 93) RETURNING id`).Scan(&userid) + +For more details on RETURNING, see the Postgres documentation: + + http://www.postgresql.org/docs/current/static/sql-insert.html + http://www.postgresql.org/docs/current/static/sql-update.html + http://www.postgresql.org/docs/current/static/sql-delete.html + +For additional instructions on querying see the documentation for the database/sql package. + + +Data Types + + +Parameters pass through driver.DefaultParameterConverter before they are handled +by this package. When the binary_parameters connection option is enabled, +[]byte values are sent directly to the backend as data in binary format. + +This package returns the following types for values from the PostgreSQL backend: + + - integer types smallint, integer, and bigint are returned as int64 + - floating-point types real and double precision are returned as float64 + - character types char, varchar, and text are returned as string + - temporal types date, time, timetz, timestamp, and timestamptz are + returned as time.Time + - the boolean type is returned as bool + - the bytea type is returned as []byte + +All other types are returned directly from the backend as []byte values in text format. + + +Errors + + +pq may return errors of type *pq.Error which can be interrogated for error details: + + if err, ok := err.(*pq.Error); ok { + fmt.Println("pq error:", err.Code.Name()) + } + +See the pq.Error type for details. + + +Bulk imports + +You can perform bulk imports by preparing a statement returned by pq.CopyIn (or +pq.CopyInSchema) in an explicit transaction (sql.Tx). The returned statement +handle can then be repeatedly "executed" to copy data into the target table. +After all data has been processed you should call Exec() once with no arguments +to flush all buffered data. Any call to Exec() might return an error which +should be handled appropriately, but because of the internal buffering an error +returned by Exec() might not be related to the data passed in the call that +failed. + +CopyIn uses COPY FROM internally. It is not possible to COPY outside of an +explicit transaction in pq. + +Usage example: + + txn, err := db.Begin() + if err != nil { + log.Fatal(err) + } + + stmt, err := txn.Prepare(pq.CopyIn("users", "name", "age")) + if err != nil { + log.Fatal(err) + } + + for _, user := range users { + _, err = stmt.Exec(user.Name, int64(user.Age)) + if err != nil { + log.Fatal(err) + } + } + + _, err = stmt.Exec() + if err != nil { + log.Fatal(err) + } + + err = stmt.Close() + if err != nil { + log.Fatal(err) + } + + err = txn.Commit() + if err != nil { + log.Fatal(err) + } + + +Notifications + + +PostgreSQL supports a simple publish/subscribe model over database +connections. See http://www.postgresql.org/docs/current/static/sql-notify.html +for more information about the general mechanism. + +To start listening for notifications, you first have to open a new connection +to the database by calling NewListener. This connection can not be used for +anything other than LISTEN / NOTIFY. Calling Listen will open a "notification +channel"; once a notification channel is open, a notification generated on that +channel will effect a send on the Listener.Notify channel. A notification +channel will remain open until Unlisten is called, though connection loss might +result in some notifications being lost. To solve this problem, Listener sends +a nil pointer over the Notify channel any time the connection is re-established +following a connection loss. The application can get information about the +state of the underlying connection by setting an event callback in the call to +NewListener. + +A single Listener can safely be used from concurrent goroutines, which means +that there is often no need to create more than one Listener in your +application. However, a Listener is always connected to a single database, so +you will need to create a new Listener instance for every database you want to +receive notifications in. + +The channel name in both Listen and Unlisten is case sensitive, and can contain +any characters legal in an identifier (see +http://www.postgresql.org/docs/current/static/sql-syntax-lexical.html#SQL-SYNTAX-IDENTIFIERS +for more information). Note that the channel name will be truncated to 63 +bytes by the PostgreSQL server. + +You can find a complete, working example of Listener usage at +https://godoc.org/github.com/lib/pq/example/listen. + + +Kerberos Support + + +If you need support for Kerberos authentication, add the following to your main +package: + + import "github.com/lib/pq/auth/kerberos" + + func init() { + pq.RegisterGSSProvider(func() (pq.Gss, error) { return kerberos.NewGSS() }) + } + +This package is in a separate module so that users who don't need Kerberos +don't have to download unnecessary dependencies. + +When imported, additional connection string parameters are supported: + + * krbsrvname - GSS (Kerberos) service name when constructing the + SPN (default is `postgres`). This will be combined with the host + to form the full SPN: `krbsrvname/host`. + * krbspn - GSS (Kerberos) SPN. This takes priority over + `krbsrvname` if present. +*/ +package pq diff --git a/vendor/github.com/lib/pq/encode.go b/vendor/github.com/lib/pq/encode.go new file mode 100644 index 0000000000..210b1ec347 --- /dev/null +++ b/vendor/github.com/lib/pq/encode.go @@ -0,0 +1,628 @@ +package pq + +import ( + "bytes" + "database/sql/driver" + "encoding/binary" + "encoding/hex" + "errors" + "fmt" + "math" + "regexp" + "strconv" + "strings" + "sync" + "time" + + "github.com/lib/pq/oid" +) + +var time2400Regex = regexp.MustCompile(`^(24:00(?::00(?:\.0+)?)?)(?:[Z+-].*)?$`) + +func binaryEncode(parameterStatus *parameterStatus, x interface{}) []byte { + switch v := x.(type) { + case []byte: + return v + default: + return encode(parameterStatus, x, oid.T_unknown) + } +} + +func encode(parameterStatus *parameterStatus, x interface{}, pgtypOid oid.Oid) []byte { + switch v := x.(type) { + case int64: + return strconv.AppendInt(nil, v, 10) + case float64: + return strconv.AppendFloat(nil, v, 'f', -1, 64) + case []byte: + if pgtypOid == oid.T_bytea { + return encodeBytea(parameterStatus.serverVersion, v) + } + + return v + case string: + if pgtypOid == oid.T_bytea { + return encodeBytea(parameterStatus.serverVersion, []byte(v)) + } + + return []byte(v) + case bool: + return strconv.AppendBool(nil, v) + case time.Time: + return formatTs(v) + + default: + errorf("encode: unknown type for %T", v) + } + + panic("not reached") +} + +func decode(parameterStatus *parameterStatus, s []byte, typ oid.Oid, f format) interface{} { + switch f { + case formatBinary: + return binaryDecode(parameterStatus, s, typ) + case formatText: + return textDecode(parameterStatus, s, typ) + default: + panic("not reached") + } +} + +func binaryDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} { + switch typ { + case oid.T_bytea: + return s + case oid.T_int8: + return int64(binary.BigEndian.Uint64(s)) + case oid.T_int4: + return int64(int32(binary.BigEndian.Uint32(s))) + case oid.T_int2: + return int64(int16(binary.BigEndian.Uint16(s))) + case oid.T_uuid: + b, err := decodeUUIDBinary(s) + if err != nil { + panic(err) + } + return b + + default: + errorf("don't know how to decode binary parameter of type %d", uint32(typ)) + } + + panic("not reached") +} + +func textDecode(parameterStatus *parameterStatus, s []byte, typ oid.Oid) interface{} { + switch typ { + case oid.T_char, oid.T_varchar, oid.T_text: + return string(s) + case oid.T_bytea: + b, err := parseBytea(s) + if err != nil { + errorf("%s", err) + } + return b + case oid.T_timestamptz: + return parseTs(parameterStatus.currentLocation, string(s)) + case oid.T_timestamp, oid.T_date: + return parseTs(nil, string(s)) + case oid.T_time: + return mustParse("15:04:05", typ, s) + case oid.T_timetz: + return mustParse("15:04:05-07", typ, s) + case oid.T_bool: + return s[0] == 't' + case oid.T_int8, oid.T_int4, oid.T_int2: + i, err := strconv.ParseInt(string(s), 10, 64) + if err != nil { + errorf("%s", err) + } + return i + case oid.T_float4, oid.T_float8: + // We always use 64 bit parsing, regardless of whether the input text is for + // a float4 or float8, because clients expect float64s for all float datatypes + // and returning a 32-bit parsed float64 produces lossy results. + f, err := strconv.ParseFloat(string(s), 64) + if err != nil { + errorf("%s", err) + } + return f + } + + return s +} + +// appendEncodedText encodes item in text format as required by COPY +// and appends to buf +func appendEncodedText(parameterStatus *parameterStatus, buf []byte, x interface{}) []byte { + switch v := x.(type) { + case int64: + return strconv.AppendInt(buf, v, 10) + case float64: + return strconv.AppendFloat(buf, v, 'f', -1, 64) + case []byte: + encodedBytea := encodeBytea(parameterStatus.serverVersion, v) + return appendEscapedText(buf, string(encodedBytea)) + case string: + return appendEscapedText(buf, v) + case bool: + return strconv.AppendBool(buf, v) + case time.Time: + return append(buf, formatTs(v)...) + case nil: + return append(buf, "\\N"...) + default: + errorf("encode: unknown type for %T", v) + } + + panic("not reached") +} + +func appendEscapedText(buf []byte, text string) []byte { + escapeNeeded := false + startPos := 0 + var c byte + + // check if we need to escape + for i := 0; i < len(text); i++ { + c = text[i] + if c == '\\' || c == '\n' || c == '\r' || c == '\t' { + escapeNeeded = true + startPos = i + break + } + } + if !escapeNeeded { + return append(buf, text...) + } + + // copy till first char to escape, iterate the rest + result := append(buf, text[:startPos]...) + for i := startPos; i < len(text); i++ { + c = text[i] + switch c { + case '\\': + result = append(result, '\\', '\\') + case '\n': + result = append(result, '\\', 'n') + case '\r': + result = append(result, '\\', 'r') + case '\t': + result = append(result, '\\', 't') + default: + result = append(result, c) + } + } + return result +} + +func mustParse(f string, typ oid.Oid, s []byte) time.Time { + str := string(s) + + // Check for a minute and second offset in the timezone. + if typ == oid.T_timestamptz || typ == oid.T_timetz { + for i := 3; i <= 6; i += 3 { + if str[len(str)-i] == ':' { + f += ":00" + continue + } + break + } + } + + // Special case for 24:00 time. + // Unfortunately, golang does not parse 24:00 as a proper time. + // In this case, we want to try "round to the next day", to differentiate. + // As such, we find if the 24:00 time matches at the beginning; if so, + // we default it back to 00:00 but add a day later. + var is2400Time bool + switch typ { + case oid.T_timetz, oid.T_time: + if matches := time2400Regex.FindStringSubmatch(str); matches != nil { + // Concatenate timezone information at the back. + str = "00:00:00" + str[len(matches[1]):] + is2400Time = true + } + } + t, err := time.Parse(f, str) + if err != nil { + errorf("decode: %s", err) + } + if is2400Time { + t = t.Add(24 * time.Hour) + } + return t +} + +var errInvalidTimestamp = errors.New("invalid timestamp") + +type timestampParser struct { + err error +} + +func (p *timestampParser) expect(str string, char byte, pos int) { + if p.err != nil { + return + } + if pos+1 > len(str) { + p.err = errInvalidTimestamp + return + } + if c := str[pos]; c != char && p.err == nil { + p.err = fmt.Errorf("expected '%v' at position %v; got '%v'", char, pos, c) + } +} + +func (p *timestampParser) mustAtoi(str string, begin int, end int) int { + if p.err != nil { + return 0 + } + if begin < 0 || end < 0 || begin > end || end > len(str) { + p.err = errInvalidTimestamp + return 0 + } + result, err := strconv.Atoi(str[begin:end]) + if err != nil { + if p.err == nil { + p.err = fmt.Errorf("expected number; got '%v'", str) + } + return 0 + } + return result +} + +// The location cache caches the time zones typically used by the client. +type locationCache struct { + cache map[int]*time.Location + lock sync.Mutex +} + +// All connections share the same list of timezones. Benchmarking shows that +// about 5% speed could be gained by putting the cache in the connection and +// losing the mutex, at the cost of a small amount of memory and a somewhat +// significant increase in code complexity. +var globalLocationCache = newLocationCache() + +func newLocationCache() *locationCache { + return &locationCache{cache: make(map[int]*time.Location)} +} + +// Returns the cached timezone for the specified offset, creating and caching +// it if necessary. +func (c *locationCache) getLocation(offset int) *time.Location { + c.lock.Lock() + defer c.lock.Unlock() + + location, ok := c.cache[offset] + if !ok { + location = time.FixedZone("", offset) + c.cache[offset] = location + } + + return location +} + +var infinityTsEnabled = false +var infinityTsNegative time.Time +var infinityTsPositive time.Time + +const ( + infinityTsEnabledAlready = "pq: infinity timestamp enabled already" + infinityTsNegativeMustBeSmaller = "pq: infinity timestamp: negative value must be smaller (before) than positive" +) + +// EnableInfinityTs controls the handling of Postgres' "-infinity" and +// "infinity" "timestamp"s. +// +// If EnableInfinityTs is not called, "-infinity" and "infinity" will return +// []byte("-infinity") and []byte("infinity") respectively, and potentially +// cause error "sql: Scan error on column index 0: unsupported driver -> Scan +// pair: []uint8 -> *time.Time", when scanning into a time.Time value. +// +// Once EnableInfinityTs has been called, all connections created using this +// driver will decode Postgres' "-infinity" and "infinity" for "timestamp", +// "timestamp with time zone" and "date" types to the predefined minimum and +// maximum times, respectively. When encoding time.Time values, any time which +// equals or precedes the predefined minimum time will be encoded to +// "-infinity". Any values at or past the maximum time will similarly be +// encoded to "infinity". +// +// If EnableInfinityTs is called with negative >= positive, it will panic. +// Calling EnableInfinityTs after a connection has been established results in +// undefined behavior. If EnableInfinityTs is called more than once, it will +// panic. +func EnableInfinityTs(negative time.Time, positive time.Time) { + if infinityTsEnabled { + panic(infinityTsEnabledAlready) + } + if !negative.Before(positive) { + panic(infinityTsNegativeMustBeSmaller) + } + infinityTsEnabled = true + infinityTsNegative = negative + infinityTsPositive = positive +} + +/* + * Testing might want to toggle infinityTsEnabled + */ +func disableInfinityTs() { + infinityTsEnabled = false +} + +// This is a time function specific to the Postgres default DateStyle +// setting ("ISO, MDY"), the only one we currently support. This +// accounts for the discrepancies between the parsing available with +// time.Parse and the Postgres date formatting quirks. +func parseTs(currentLocation *time.Location, str string) interface{} { + switch str { + case "-infinity": + if infinityTsEnabled { + return infinityTsNegative + } + return []byte(str) + case "infinity": + if infinityTsEnabled { + return infinityTsPositive + } + return []byte(str) + } + t, err := ParseTimestamp(currentLocation, str) + if err != nil { + panic(err) + } + return t +} + +// ParseTimestamp parses Postgres' text format. It returns a time.Time in +// currentLocation iff that time's offset agrees with the offset sent from the +// Postgres server. Otherwise, ParseTimestamp returns a time.Time with the +// fixed offset offset provided by the Postgres server. +func ParseTimestamp(currentLocation *time.Location, str string) (time.Time, error) { + p := timestampParser{} + + monSep := strings.IndexRune(str, '-') + // this is Gregorian year, not ISO Year + // In Gregorian system, the year 1 BC is followed by AD 1 + year := p.mustAtoi(str, 0, monSep) + daySep := monSep + 3 + month := p.mustAtoi(str, monSep+1, daySep) + p.expect(str, '-', daySep) + timeSep := daySep + 3 + day := p.mustAtoi(str, daySep+1, timeSep) + + minLen := monSep + len("01-01") + 1 + + isBC := strings.HasSuffix(str, " BC") + if isBC { + minLen += 3 + } + + var hour, minute, second int + if len(str) > minLen { + p.expect(str, ' ', timeSep) + minSep := timeSep + 3 + p.expect(str, ':', minSep) + hour = p.mustAtoi(str, timeSep+1, minSep) + secSep := minSep + 3 + p.expect(str, ':', secSep) + minute = p.mustAtoi(str, minSep+1, secSep) + secEnd := secSep + 3 + second = p.mustAtoi(str, secSep+1, secEnd) + } + remainderIdx := monSep + len("01-01 00:00:00") + 1 + // Three optional (but ordered) sections follow: the + // fractional seconds, the time zone offset, and the BC + // designation. We set them up here and adjust the other + // offsets if the preceding sections exist. + + nanoSec := 0 + tzOff := 0 + + if remainderIdx < len(str) && str[remainderIdx] == '.' { + fracStart := remainderIdx + 1 + fracOff := strings.IndexAny(str[fracStart:], "-+ ") + if fracOff < 0 { + fracOff = len(str) - fracStart + } + fracSec := p.mustAtoi(str, fracStart, fracStart+fracOff) + nanoSec = fracSec * (1000000000 / int(math.Pow(10, float64(fracOff)))) + + remainderIdx += fracOff + 1 + } + if tzStart := remainderIdx; tzStart < len(str) && (str[tzStart] == '-' || str[tzStart] == '+') { + // time zone separator is always '-' or '+' (UTC is +00) + var tzSign int + switch c := str[tzStart]; c { + case '-': + tzSign = -1 + case '+': + tzSign = +1 + default: + return time.Time{}, fmt.Errorf("expected '-' or '+' at position %v; got %v", tzStart, c) + } + tzHours := p.mustAtoi(str, tzStart+1, tzStart+3) + remainderIdx += 3 + var tzMin, tzSec int + if remainderIdx < len(str) && str[remainderIdx] == ':' { + tzMin = p.mustAtoi(str, remainderIdx+1, remainderIdx+3) + remainderIdx += 3 + } + if remainderIdx < len(str) && str[remainderIdx] == ':' { + tzSec = p.mustAtoi(str, remainderIdx+1, remainderIdx+3) + remainderIdx += 3 + } + tzOff = tzSign * ((tzHours * 60 * 60) + (tzMin * 60) + tzSec) + } + var isoYear int + + if isBC { + isoYear = 1 - year + remainderIdx += 3 + } else { + isoYear = year + } + if remainderIdx < len(str) { + return time.Time{}, fmt.Errorf("expected end of input, got %v", str[remainderIdx:]) + } + t := time.Date(isoYear, time.Month(month), day, + hour, minute, second, nanoSec, + globalLocationCache.getLocation(tzOff)) + + if currentLocation != nil { + // Set the location of the returned Time based on the session's + // TimeZone value, but only if the local time zone database agrees with + // the remote database on the offset. + lt := t.In(currentLocation) + _, newOff := lt.Zone() + if newOff == tzOff { + t = lt + } + } + + return t, p.err +} + +// formatTs formats t into a format postgres understands. +func formatTs(t time.Time) []byte { + if infinityTsEnabled { + // t <= -infinity : ! (t > -infinity) + if !t.After(infinityTsNegative) { + return []byte("-infinity") + } + // t >= infinity : ! (!t < infinity) + if !t.Before(infinityTsPositive) { + return []byte("infinity") + } + } + return FormatTimestamp(t) +} + +// FormatTimestamp formats t into Postgres' text format for timestamps. +func FormatTimestamp(t time.Time) []byte { + // Need to send dates before 0001 A.D. with " BC" suffix, instead of the + // minus sign preferred by Go. + // Beware, "0000" in ISO is "1 BC", "-0001" is "2 BC" and so on + bc := false + if t.Year() <= 0 { + // flip year sign, and add 1, e.g: "0" will be "1", and "-10" will be "11" + t = t.AddDate((-t.Year())*2+1, 0, 0) + bc = true + } + b := []byte(t.Format("2006-01-02 15:04:05.999999999Z07:00")) + + _, offset := t.Zone() + offset %= 60 + if offset != 0 { + // RFC3339Nano already printed the minus sign + if offset < 0 { + offset = -offset + } + + b = append(b, ':') + if offset < 10 { + b = append(b, '0') + } + b = strconv.AppendInt(b, int64(offset), 10) + } + + if bc { + b = append(b, " BC"...) + } + return b +} + +// Parse a bytea value received from the server. Both "hex" and the legacy +// "escape" format are supported. +func parseBytea(s []byte) (result []byte, err error) { + if len(s) >= 2 && bytes.Equal(s[:2], []byte("\\x")) { + // bytea_output = hex + s = s[2:] // trim off leading "\\x" + result = make([]byte, hex.DecodedLen(len(s))) + _, err := hex.Decode(result, s) + if err != nil { + return nil, err + } + } else { + // bytea_output = escape + for len(s) > 0 { + if s[0] == '\\' { + // escaped '\\' + if len(s) >= 2 && s[1] == '\\' { + result = append(result, '\\') + s = s[2:] + continue + } + + // '\\' followed by an octal number + if len(s) < 4 { + return nil, fmt.Errorf("invalid bytea sequence %v", s) + } + r, err := strconv.ParseUint(string(s[1:4]), 8, 8) + if err != nil { + return nil, fmt.Errorf("could not parse bytea value: %s", err.Error()) + } + result = append(result, byte(r)) + s = s[4:] + } else { + // We hit an unescaped, raw byte. Try to read in as many as + // possible in one go. + i := bytes.IndexByte(s, '\\') + if i == -1 { + result = append(result, s...) + break + } + result = append(result, s[:i]...) + s = s[i:] + } + } + } + + return result, nil +} + +func encodeBytea(serverVersion int, v []byte) (result []byte) { + if serverVersion >= 90000 { + // Use the hex format if we know that the server supports it + result = make([]byte, 2+hex.EncodedLen(len(v))) + result[0] = '\\' + result[1] = 'x' + hex.Encode(result[2:], v) + } else { + // .. or resort to "escape" + for _, b := range v { + if b == '\\' { + result = append(result, '\\', '\\') + } else if b < 0x20 || b > 0x7e { + result = append(result, []byte(fmt.Sprintf("\\%03o", b))...) + } else { + result = append(result, b) + } + } + } + + return result +} + +// NullTime represents a time.Time that may be null. NullTime implements the +// sql.Scanner interface so it can be used as a scan destination, similar to +// sql.NullString. +type NullTime struct { + Time time.Time + Valid bool // Valid is true if Time is not NULL +} + +// Scan implements the Scanner interface. +func (nt *NullTime) Scan(value interface{}) error { + nt.Time, nt.Valid = value.(time.Time) + return nil +} + +// Value implements the driver Valuer interface. +func (nt NullTime) Value() (driver.Value, error) { + if !nt.Valid { + return nil, nil + } + return nt.Time, nil +} diff --git a/vendor/github.com/lib/pq/error.go b/vendor/github.com/lib/pq/error.go new file mode 100644 index 0000000000..5cfe9c6e9f --- /dev/null +++ b/vendor/github.com/lib/pq/error.go @@ -0,0 +1,518 @@ +package pq + +import ( + "database/sql/driver" + "fmt" + "io" + "net" + "runtime" +) + +// Error severities +const ( + Efatal = "FATAL" + Epanic = "PANIC" + Ewarning = "WARNING" + Enotice = "NOTICE" + Edebug = "DEBUG" + Einfo = "INFO" + Elog = "LOG" +) + +// Error represents an error communicating with the server. +// +// See http://www.postgresql.org/docs/current/static/protocol-error-fields.html for details of the fields +type Error struct { + Severity string + Code ErrorCode + Message string + Detail string + Hint string + Position string + InternalPosition string + InternalQuery string + Where string + Schema string + Table string + Column string + DataTypeName string + Constraint string + File string + Line string + Routine string +} + +// ErrorCode is a five-character error code. +type ErrorCode string + +// Name returns a more human friendly rendering of the error code, namely the +// "condition name". +// +// See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for +// details. +func (ec ErrorCode) Name() string { + return errorCodeNames[ec] +} + +// ErrorClass is only the class part of an error code. +type ErrorClass string + +// Name returns the condition name of an error class. It is equivalent to the +// condition name of the "standard" error code (i.e. the one having the last +// three characters "000"). +func (ec ErrorClass) Name() string { + return errorCodeNames[ErrorCode(ec+"000")] +} + +// Class returns the error class, e.g. "28". +// +// See http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html for +// details. +func (ec ErrorCode) Class() ErrorClass { + return ErrorClass(ec[0:2]) +} + +// errorCodeNames is a mapping between the five-character error codes and the +// human readable "condition names". It is derived from the list at +// http://www.postgresql.org/docs/9.3/static/errcodes-appendix.html +var errorCodeNames = map[ErrorCode]string{ + // Class 00 - Successful Completion + "00000": "successful_completion", + // Class 01 - Warning + "01000": "warning", + "0100C": "dynamic_result_sets_returned", + "01008": "implicit_zero_bit_padding", + "01003": "null_value_eliminated_in_set_function", + "01007": "privilege_not_granted", + "01006": "privilege_not_revoked", + "01004": "string_data_right_truncation", + "01P01": "deprecated_feature", + // Class 02 - No Data (this is also a warning class per the SQL standard) + "02000": "no_data", + "02001": "no_additional_dynamic_result_sets_returned", + // Class 03 - SQL Statement Not Yet Complete + "03000": "sql_statement_not_yet_complete", + // Class 08 - Connection Exception + "08000": "connection_exception", + "08003": "connection_does_not_exist", + "08006": "connection_failure", + "08001": "sqlclient_unable_to_establish_sqlconnection", + "08004": "sqlserver_rejected_establishment_of_sqlconnection", + "08007": "transaction_resolution_unknown", + "08P01": "protocol_violation", + // Class 09 - Triggered Action Exception + "09000": "triggered_action_exception", + // Class 0A - Feature Not Supported + "0A000": "feature_not_supported", + // Class 0B - Invalid Transaction Initiation + "0B000": "invalid_transaction_initiation", + // Class 0F - Locator Exception + "0F000": "locator_exception", + "0F001": "invalid_locator_specification", + // Class 0L - Invalid Grantor + "0L000": "invalid_grantor", + "0LP01": "invalid_grant_operation", + // Class 0P - Invalid Role Specification + "0P000": "invalid_role_specification", + // Class 0Z - Diagnostics Exception + "0Z000": "diagnostics_exception", + "0Z002": "stacked_diagnostics_accessed_without_active_handler", + // Class 20 - Case Not Found + "20000": "case_not_found", + // Class 21 - Cardinality Violation + "21000": "cardinality_violation", + // Class 22 - Data Exception + "22000": "data_exception", + "2202E": "array_subscript_error", + "22021": "character_not_in_repertoire", + "22008": "datetime_field_overflow", + "22012": "division_by_zero", + "22005": "error_in_assignment", + "2200B": "escape_character_conflict", + "22022": "indicator_overflow", + "22015": "interval_field_overflow", + "2201E": "invalid_argument_for_logarithm", + "22014": "invalid_argument_for_ntile_function", + "22016": "invalid_argument_for_nth_value_function", + "2201F": "invalid_argument_for_power_function", + "2201G": "invalid_argument_for_width_bucket_function", + "22018": "invalid_character_value_for_cast", + "22007": "invalid_datetime_format", + "22019": "invalid_escape_character", + "2200D": "invalid_escape_octet", + "22025": "invalid_escape_sequence", + "22P06": "nonstandard_use_of_escape_character", + "22010": "invalid_indicator_parameter_value", + "22023": "invalid_parameter_value", + "2201B": "invalid_regular_expression", + "2201W": "invalid_row_count_in_limit_clause", + "2201X": "invalid_row_count_in_result_offset_clause", + "22009": "invalid_time_zone_displacement_value", + "2200C": "invalid_use_of_escape_character", + "2200G": "most_specific_type_mismatch", + "22004": "null_value_not_allowed", + "22002": "null_value_no_indicator_parameter", + "22003": "numeric_value_out_of_range", + "2200H": "sequence_generator_limit_exceeded", + "22026": "string_data_length_mismatch", + "22001": "string_data_right_truncation", + "22011": "substring_error", + "22027": "trim_error", + "22024": "unterminated_c_string", + "2200F": "zero_length_character_string", + "22P01": "floating_point_exception", + "22P02": "invalid_text_representation", + "22P03": "invalid_binary_representation", + "22P04": "bad_copy_file_format", + "22P05": "untranslatable_character", + "2200L": "not_an_xml_document", + "2200M": "invalid_xml_document", + "2200N": "invalid_xml_content", + "2200S": "invalid_xml_comment", + "2200T": "invalid_xml_processing_instruction", + // Class 23 - Integrity Constraint Violation + "23000": "integrity_constraint_violation", + "23001": "restrict_violation", + "23502": "not_null_violation", + "23503": "foreign_key_violation", + "23505": "unique_violation", + "23514": "check_violation", + "23P01": "exclusion_violation", + // Class 24 - Invalid Cursor State + "24000": "invalid_cursor_state", + // Class 25 - Invalid Transaction State + "25000": "invalid_transaction_state", + "25001": "active_sql_transaction", + "25002": "branch_transaction_already_active", + "25008": "held_cursor_requires_same_isolation_level", + "25003": "inappropriate_access_mode_for_branch_transaction", + "25004": "inappropriate_isolation_level_for_branch_transaction", + "25005": "no_active_sql_transaction_for_branch_transaction", + "25006": "read_only_sql_transaction", + "25007": "schema_and_data_statement_mixing_not_supported", + "25P01": "no_active_sql_transaction", + "25P02": "in_failed_sql_transaction", + // Class 26 - Invalid SQL Statement Name + "26000": "invalid_sql_statement_name", + // Class 27 - Triggered Data Change Violation + "27000": "triggered_data_change_violation", + // Class 28 - Invalid Authorization Specification + "28000": "invalid_authorization_specification", + "28P01": "invalid_password", + // Class 2B - Dependent Privilege Descriptors Still Exist + "2B000": "dependent_privilege_descriptors_still_exist", + "2BP01": "dependent_objects_still_exist", + // Class 2D - Invalid Transaction Termination + "2D000": "invalid_transaction_termination", + // Class 2F - SQL Routine Exception + "2F000": "sql_routine_exception", + "2F005": "function_executed_no_return_statement", + "2F002": "modifying_sql_data_not_permitted", + "2F003": "prohibited_sql_statement_attempted", + "2F004": "reading_sql_data_not_permitted", + // Class 34 - Invalid Cursor Name + "34000": "invalid_cursor_name", + // Class 38 - External Routine Exception + "38000": "external_routine_exception", + "38001": "containing_sql_not_permitted", + "38002": "modifying_sql_data_not_permitted", + "38003": "prohibited_sql_statement_attempted", + "38004": "reading_sql_data_not_permitted", + // Class 39 - External Routine Invocation Exception + "39000": "external_routine_invocation_exception", + "39001": "invalid_sqlstate_returned", + "39004": "null_value_not_allowed", + "39P01": "trigger_protocol_violated", + "39P02": "srf_protocol_violated", + // Class 3B - Savepoint Exception + "3B000": "savepoint_exception", + "3B001": "invalid_savepoint_specification", + // Class 3D - Invalid Catalog Name + "3D000": "invalid_catalog_name", + // Class 3F - Invalid Schema Name + "3F000": "invalid_schema_name", + // Class 40 - Transaction Rollback + "40000": "transaction_rollback", + "40002": "transaction_integrity_constraint_violation", + "40001": "serialization_failure", + "40003": "statement_completion_unknown", + "40P01": "deadlock_detected", + // Class 42 - Syntax Error or Access Rule Violation + "42000": "syntax_error_or_access_rule_violation", + "42601": "syntax_error", + "42501": "insufficient_privilege", + "42846": "cannot_coerce", + "42803": "grouping_error", + "42P20": "windowing_error", + "42P19": "invalid_recursion", + "42830": "invalid_foreign_key", + "42602": "invalid_name", + "42622": "name_too_long", + "42939": "reserved_name", + "42804": "datatype_mismatch", + "42P18": "indeterminate_datatype", + "42P21": "collation_mismatch", + "42P22": "indeterminate_collation", + "42809": "wrong_object_type", + "42703": "undefined_column", + "42883": "undefined_function", + "42P01": "undefined_table", + "42P02": "undefined_parameter", + "42704": "undefined_object", + "42701": "duplicate_column", + "42P03": "duplicate_cursor", + "42P04": "duplicate_database", + "42723": "duplicate_function", + "42P05": "duplicate_prepared_statement", + "42P06": "duplicate_schema", + "42P07": "duplicate_table", + "42712": "duplicate_alias", + "42710": "duplicate_object", + "42702": "ambiguous_column", + "42725": "ambiguous_function", + "42P08": "ambiguous_parameter", + "42P09": "ambiguous_alias", + "42P10": "invalid_column_reference", + "42611": "invalid_column_definition", + "42P11": "invalid_cursor_definition", + "42P12": "invalid_database_definition", + "42P13": "invalid_function_definition", + "42P14": "invalid_prepared_statement_definition", + "42P15": "invalid_schema_definition", + "42P16": "invalid_table_definition", + "42P17": "invalid_object_definition", + // Class 44 - WITH CHECK OPTION Violation + "44000": "with_check_option_violation", + // Class 53 - Insufficient Resources + "53000": "insufficient_resources", + "53100": "disk_full", + "53200": "out_of_memory", + "53300": "too_many_connections", + "53400": "configuration_limit_exceeded", + // Class 54 - Program Limit Exceeded + "54000": "program_limit_exceeded", + "54001": "statement_too_complex", + "54011": "too_many_columns", + "54023": "too_many_arguments", + // Class 55 - Object Not In Prerequisite State + "55000": "object_not_in_prerequisite_state", + "55006": "object_in_use", + "55P02": "cant_change_runtime_param", + "55P03": "lock_not_available", + // Class 57 - Operator Intervention + "57000": "operator_intervention", + "57014": "query_canceled", + "57P01": "admin_shutdown", + "57P02": "crash_shutdown", + "57P03": "cannot_connect_now", + "57P04": "database_dropped", + // Class 58 - System Error (errors external to PostgreSQL itself) + "58000": "system_error", + "58030": "io_error", + "58P01": "undefined_file", + "58P02": "duplicate_file", + // Class F0 - Configuration File Error + "F0000": "config_file_error", + "F0001": "lock_file_exists", + // Class HV - Foreign Data Wrapper Error (SQL/MED) + "HV000": "fdw_error", + "HV005": "fdw_column_name_not_found", + "HV002": "fdw_dynamic_parameter_value_needed", + "HV010": "fdw_function_sequence_error", + "HV021": "fdw_inconsistent_descriptor_information", + "HV024": "fdw_invalid_attribute_value", + "HV007": "fdw_invalid_column_name", + "HV008": "fdw_invalid_column_number", + "HV004": "fdw_invalid_data_type", + "HV006": "fdw_invalid_data_type_descriptors", + "HV091": "fdw_invalid_descriptor_field_identifier", + "HV00B": "fdw_invalid_handle", + "HV00C": "fdw_invalid_option_index", + "HV00D": "fdw_invalid_option_name", + "HV090": "fdw_invalid_string_length_or_buffer_length", + "HV00A": "fdw_invalid_string_format", + "HV009": "fdw_invalid_use_of_null_pointer", + "HV014": "fdw_too_many_handles", + "HV001": "fdw_out_of_memory", + "HV00P": "fdw_no_schemas", + "HV00J": "fdw_option_name_not_found", + "HV00K": "fdw_reply_handle", + "HV00Q": "fdw_schema_not_found", + "HV00R": "fdw_table_not_found", + "HV00L": "fdw_unable_to_create_execution", + "HV00M": "fdw_unable_to_create_reply", + "HV00N": "fdw_unable_to_establish_connection", + // Class P0 - PL/pgSQL Error + "P0000": "plpgsql_error", + "P0001": "raise_exception", + "P0002": "no_data_found", + "P0003": "too_many_rows", + // Class XX - Internal Error + "XX000": "internal_error", + "XX001": "data_corrupted", + "XX002": "index_corrupted", +} + +func parseError(r *readBuf) *Error { + err := new(Error) + for t := r.byte(); t != 0; t = r.byte() { + msg := r.string() + switch t { + case 'S': + err.Severity = msg + case 'C': + err.Code = ErrorCode(msg) + case 'M': + err.Message = msg + case 'D': + err.Detail = msg + case 'H': + err.Hint = msg + case 'P': + err.Position = msg + case 'p': + err.InternalPosition = msg + case 'q': + err.InternalQuery = msg + case 'W': + err.Where = msg + case 's': + err.Schema = msg + case 't': + err.Table = msg + case 'c': + err.Column = msg + case 'd': + err.DataTypeName = msg + case 'n': + err.Constraint = msg + case 'F': + err.File = msg + case 'L': + err.Line = msg + case 'R': + err.Routine = msg + } + } + return err +} + +// Fatal returns true if the Error Severity is fatal. +func (err *Error) Fatal() bool { + return err.Severity == Efatal +} + +// Get implements the legacy PGError interface. New code should use the fields +// of the Error struct directly. +func (err *Error) Get(k byte) (v string) { + switch k { + case 'S': + return err.Severity + case 'C': + return string(err.Code) + case 'M': + return err.Message + case 'D': + return err.Detail + case 'H': + return err.Hint + case 'P': + return err.Position + case 'p': + return err.InternalPosition + case 'q': + return err.InternalQuery + case 'W': + return err.Where + case 's': + return err.Schema + case 't': + return err.Table + case 'c': + return err.Column + case 'd': + return err.DataTypeName + case 'n': + return err.Constraint + case 'F': + return err.File + case 'L': + return err.Line + case 'R': + return err.Routine + } + return "" +} + +func (err Error) Error() string { + return "pq: " + err.Message +} + +// PGError is an interface used by previous versions of pq. It is provided +// only to support legacy code. New code should use the Error type. +type PGError interface { + Error() string + Fatal() bool + Get(k byte) (v string) +} + +func errorf(s string, args ...interface{}) { + panic(fmt.Errorf("pq: %s", fmt.Sprintf(s, args...))) +} + +// TODO(ainar-g) Rename to errorf after removing panics. +func fmterrorf(s string, args ...interface{}) error { + return fmt.Errorf("pq: %s", fmt.Sprintf(s, args...)) +} + +func errRecoverNoErrBadConn(err *error) { + e := recover() + if e == nil { + // Do nothing + return + } + var ok bool + *err, ok = e.(error) + if !ok { + *err = fmt.Errorf("pq: unexpected error: %#v", e) + } +} + +func (cn *conn) errRecover(err *error) { + e := recover() + switch v := e.(type) { + case nil: + // Do nothing + case runtime.Error: + cn.err.set(driver.ErrBadConn) + panic(v) + case *Error: + if v.Fatal() { + *err = driver.ErrBadConn + } else { + *err = v + } + case *net.OpError: + cn.err.set(driver.ErrBadConn) + *err = v + case *safeRetryError: + cn.err.set(driver.ErrBadConn) + *err = driver.ErrBadConn + case error: + if v == io.EOF || v.Error() == "remote error: handshake failure" { + *err = driver.ErrBadConn + } else { + *err = v + } + + default: + cn.err.set(driver.ErrBadConn) + panic(fmt.Sprintf("unknown error: %#v", e)) + } + + // Any time we return ErrBadConn, we need to remember it since *Tx doesn't + // mark the connection bad in database/sql. + if *err == driver.ErrBadConn { + cn.err.set(driver.ErrBadConn) + } +} diff --git a/vendor/github.com/lib/pq/krb.go b/vendor/github.com/lib/pq/krb.go new file mode 100644 index 0000000000..408ec01f97 --- /dev/null +++ b/vendor/github.com/lib/pq/krb.go @@ -0,0 +1,27 @@ +package pq + +// NewGSSFunc creates a GSS authentication provider, for use with +// RegisterGSSProvider. +type NewGSSFunc func() (GSS, error) + +var newGss NewGSSFunc + +// RegisterGSSProvider registers a GSS authentication provider. For example, if +// you need to use Kerberos to authenticate with your server, add this to your +// main package: +// +// import "github.com/lib/pq/auth/kerberos" +// +// func init() { +// pq.RegisterGSSProvider(func() (pq.GSS, error) { return kerberos.NewGSS() }) +// } +func RegisterGSSProvider(newGssArg NewGSSFunc) { + newGss = newGssArg +} + +// GSS provides GSSAPI authentication (e.g., Kerberos). +type GSS interface { + GetInitToken(host string, service string) ([]byte, error) + GetInitTokenFromSpn(spn string) ([]byte, error) + Continue(inToken []byte) (done bool, outToken []byte, err error) +} diff --git a/vendor/github.com/lib/pq/notice.go b/vendor/github.com/lib/pq/notice.go new file mode 100644 index 0000000000..70ad122a7d --- /dev/null +++ b/vendor/github.com/lib/pq/notice.go @@ -0,0 +1,72 @@ +//go:build go1.10 +// +build go1.10 + +package pq + +import ( + "context" + "database/sql/driver" +) + +// NoticeHandler returns the notice handler on the given connection, if any. A +// runtime panic occurs if c is not a pq connection. This is rarely used +// directly, use ConnectorNoticeHandler and ConnectorWithNoticeHandler instead. +func NoticeHandler(c driver.Conn) func(*Error) { + return c.(*conn).noticeHandler +} + +// SetNoticeHandler sets the given notice handler on the given connection. A +// runtime panic occurs if c is not a pq connection. A nil handler may be used +// to unset it. This is rarely used directly, use ConnectorNoticeHandler and +// ConnectorWithNoticeHandler instead. +// +// Note: Notice handlers are executed synchronously by pq meaning commands +// won't continue to be processed until the handler returns. +func SetNoticeHandler(c driver.Conn, handler func(*Error)) { + c.(*conn).noticeHandler = handler +} + +// NoticeHandlerConnector wraps a regular connector and sets a notice handler +// on it. +type NoticeHandlerConnector struct { + driver.Connector + noticeHandler func(*Error) +} + +// Connect calls the underlying connector's connect method and then sets the +// notice handler. +func (n *NoticeHandlerConnector) Connect(ctx context.Context) (driver.Conn, error) { + c, err := n.Connector.Connect(ctx) + if err == nil { + SetNoticeHandler(c, n.noticeHandler) + } + return c, err +} + +// ConnectorNoticeHandler returns the currently set notice handler, if any. If +// the given connector is not a result of ConnectorWithNoticeHandler, nil is +// returned. +func ConnectorNoticeHandler(c driver.Connector) func(*Error) { + if c, ok := c.(*NoticeHandlerConnector); ok { + return c.noticeHandler + } + return nil +} + +// ConnectorWithNoticeHandler creates or sets the given handler for the given +// connector. If the given connector is a result of calling this function +// previously, it is simply set on the given connector and returned. Otherwise, +// this returns a new connector wrapping the given one and setting the notice +// handler. A nil notice handler may be used to unset it. +// +// The returned connector is intended to be used with database/sql.OpenDB. +// +// Note: Notice handlers are executed synchronously by pq meaning commands +// won't continue to be processed until the handler returns. +func ConnectorWithNoticeHandler(c driver.Connector, handler func(*Error)) *NoticeHandlerConnector { + if c, ok := c.(*NoticeHandlerConnector); ok { + c.noticeHandler = handler + return c + } + return &NoticeHandlerConnector{Connector: c, noticeHandler: handler} +} diff --git a/vendor/github.com/lib/pq/notify.go b/vendor/github.com/lib/pq/notify.go new file mode 100644 index 0000000000..5c421fdb8b --- /dev/null +++ b/vendor/github.com/lib/pq/notify.go @@ -0,0 +1,858 @@ +package pq + +// Package pq is a pure Go Postgres driver for the database/sql package. +// This module contains support for Postgres LISTEN/NOTIFY. + +import ( + "context" + "database/sql/driver" + "errors" + "fmt" + "sync" + "sync/atomic" + "time" +) + +// Notification represents a single notification from the database. +type Notification struct { + // Process ID (PID) of the notifying postgres backend. + BePid int + // Name of the channel the notification was sent on. + Channel string + // Payload, or the empty string if unspecified. + Extra string +} + +func recvNotification(r *readBuf) *Notification { + bePid := r.int32() + channel := r.string() + extra := r.string() + + return &Notification{bePid, channel, extra} +} + +// SetNotificationHandler sets the given notification handler on the given +// connection. A runtime panic occurs if c is not a pq connection. A nil handler +// may be used to unset it. +// +// Note: Notification handlers are executed synchronously by pq meaning commands +// won't continue to be processed until the handler returns. +func SetNotificationHandler(c driver.Conn, handler func(*Notification)) { + c.(*conn).notificationHandler = handler +} + +// NotificationHandlerConnector wraps a regular connector and sets a notification handler +// on it. +type NotificationHandlerConnector struct { + driver.Connector + notificationHandler func(*Notification) +} + +// Connect calls the underlying connector's connect method and then sets the +// notification handler. +func (n *NotificationHandlerConnector) Connect(ctx context.Context) (driver.Conn, error) { + c, err := n.Connector.Connect(ctx) + if err == nil { + SetNotificationHandler(c, n.notificationHandler) + } + return c, err +} + +// ConnectorNotificationHandler returns the currently set notification handler, if any. If +// the given connector is not a result of ConnectorWithNotificationHandler, nil is +// returned. +func ConnectorNotificationHandler(c driver.Connector) func(*Notification) { + if c, ok := c.(*NotificationHandlerConnector); ok { + return c.notificationHandler + } + return nil +} + +// ConnectorWithNotificationHandler creates or sets the given handler for the given +// connector. If the given connector is a result of calling this function +// previously, it is simply set on the given connector and returned. Otherwise, +// this returns a new connector wrapping the given one and setting the notification +// handler. A nil notification handler may be used to unset it. +// +// The returned connector is intended to be used with database/sql.OpenDB. +// +// Note: Notification handlers are executed synchronously by pq meaning commands +// won't continue to be processed until the handler returns. +func ConnectorWithNotificationHandler(c driver.Connector, handler func(*Notification)) *NotificationHandlerConnector { + if c, ok := c.(*NotificationHandlerConnector); ok { + c.notificationHandler = handler + return c + } + return &NotificationHandlerConnector{Connector: c, notificationHandler: handler} +} + +const ( + connStateIdle int32 = iota + connStateExpectResponse + connStateExpectReadyForQuery +) + +type message struct { + typ byte + err error +} + +var errListenerConnClosed = errors.New("pq: ListenerConn has been closed") + +// ListenerConn is a low-level interface for waiting for notifications. You +// should use Listener instead. +type ListenerConn struct { + // guards cn and err + connectionLock sync.Mutex + cn *conn + err error + + connState int32 + + // the sending goroutine will be holding this lock + senderLock sync.Mutex + + notificationChan chan<- *Notification + + replyChan chan message +} + +// NewListenerConn creates a new ListenerConn. Use NewListener instead. +func NewListenerConn(name string, notificationChan chan<- *Notification) (*ListenerConn, error) { + return newDialListenerConn(defaultDialer{}, name, notificationChan) +} + +func newDialListenerConn(d Dialer, name string, c chan<- *Notification) (*ListenerConn, error) { + cn, err := DialOpen(d, name) + if err != nil { + return nil, err + } + + l := &ListenerConn{ + cn: cn.(*conn), + notificationChan: c, + connState: connStateIdle, + replyChan: make(chan message, 2), + } + + go l.listenerConnMain() + + return l, nil +} + +// We can only allow one goroutine at a time to be running a query on the +// connection for various reasons, so the goroutine sending on the connection +// must be holding senderLock. +// +// Returns an error if an unrecoverable error has occurred and the ListenerConn +// should be abandoned. +func (l *ListenerConn) acquireSenderLock() error { + // we must acquire senderLock first to avoid deadlocks; see ExecSimpleQuery + l.senderLock.Lock() + + l.connectionLock.Lock() + err := l.err + l.connectionLock.Unlock() + if err != nil { + l.senderLock.Unlock() + return err + } + return nil +} + +func (l *ListenerConn) releaseSenderLock() { + l.senderLock.Unlock() +} + +// setState advances the protocol state to newState. Returns false if moving +// to that state from the current state is not allowed. +func (l *ListenerConn) setState(newState int32) bool { + var expectedState int32 + + switch newState { + case connStateIdle: + expectedState = connStateExpectReadyForQuery + case connStateExpectResponse: + expectedState = connStateIdle + case connStateExpectReadyForQuery: + expectedState = connStateExpectResponse + default: + panic(fmt.Sprintf("unexpected listenerConnState %d", newState)) + } + + return atomic.CompareAndSwapInt32(&l.connState, expectedState, newState) +} + +// Main logic is here: receive messages from the postgres backend, forward +// notifications and query replies and keep the internal state in sync with the +// protocol state. Returns when the connection has been lost, is about to go +// away or should be discarded because we couldn't agree on the state with the +// server backend. +func (l *ListenerConn) listenerConnLoop() (err error) { + defer errRecoverNoErrBadConn(&err) + + r := &readBuf{} + for { + t, err := l.cn.recvMessage(r) + if err != nil { + return err + } + + switch t { + case 'A': + // recvNotification copies all the data so we don't need to worry + // about the scratch buffer being overwritten. + l.notificationChan <- recvNotification(r) + + case 'T', 'D': + // only used by tests; ignore + + case 'E': + // We might receive an ErrorResponse even when not in a query; it + // is expected that the server will close the connection after + // that, but we should make sure that the error we display is the + // one from the stray ErrorResponse, not io.ErrUnexpectedEOF. + if !l.setState(connStateExpectReadyForQuery) { + return parseError(r) + } + l.replyChan <- message{t, parseError(r)} + + case 'C', 'I': + if !l.setState(connStateExpectReadyForQuery) { + // protocol out of sync + return fmt.Errorf("unexpected CommandComplete") + } + // ExecSimpleQuery doesn't need to know about this message + + case 'Z': + if !l.setState(connStateIdle) { + // protocol out of sync + return fmt.Errorf("unexpected ReadyForQuery") + } + l.replyChan <- message{t, nil} + + case 'S': + // ignore + case 'N': + if n := l.cn.noticeHandler; n != nil { + n(parseError(r)) + } + default: + return fmt.Errorf("unexpected message %q from server in listenerConnLoop", t) + } + } +} + +// This is the main routine for the goroutine receiving on the database +// connection. Most of the main logic is in listenerConnLoop. +func (l *ListenerConn) listenerConnMain() { + err := l.listenerConnLoop() + + // listenerConnLoop terminated; we're done, but we still have to clean up. + // Make sure nobody tries to start any new queries by making sure the err + // pointer is set. It is important that we do not overwrite its value; a + // connection could be closed by either this goroutine or one sending on + // the connection -- whoever closes the connection is assumed to have the + // more meaningful error message (as the other one will probably get + // net.errClosed), so that goroutine sets the error we expose while the + // other error is discarded. If the connection is lost while two + // goroutines are operating on the socket, it probably doesn't matter which + // error we expose so we don't try to do anything more complex. + l.connectionLock.Lock() + if l.err == nil { + l.err = err + } + l.cn.Close() + l.connectionLock.Unlock() + + // There might be a query in-flight; make sure nobody's waiting for a + // response to it, since there's not going to be one. + close(l.replyChan) + + // let the listener know we're done + close(l.notificationChan) + + // this ListenerConn is done +} + +// Listen sends a LISTEN query to the server. See ExecSimpleQuery. +func (l *ListenerConn) Listen(channel string) (bool, error) { + return l.ExecSimpleQuery("LISTEN " + QuoteIdentifier(channel)) +} + +// Unlisten sends an UNLISTEN query to the server. See ExecSimpleQuery. +func (l *ListenerConn) Unlisten(channel string) (bool, error) { + return l.ExecSimpleQuery("UNLISTEN " + QuoteIdentifier(channel)) +} + +// UnlistenAll sends an `UNLISTEN *` query to the server. See ExecSimpleQuery. +func (l *ListenerConn) UnlistenAll() (bool, error) { + return l.ExecSimpleQuery("UNLISTEN *") +} + +// Ping the remote server to make sure it's alive. Non-nil error means the +// connection has failed and should be abandoned. +func (l *ListenerConn) Ping() error { + sent, err := l.ExecSimpleQuery("") + if !sent { + return err + } + if err != nil { + // shouldn't happen + panic(err) + } + return nil +} + +// Attempt to send a query on the connection. Returns an error if sending the +// query failed, and the caller should initiate closure of this connection. +// The caller must be holding senderLock (see acquireSenderLock and +// releaseSenderLock). +func (l *ListenerConn) sendSimpleQuery(q string) (err error) { + defer errRecoverNoErrBadConn(&err) + + // must set connection state before sending the query + if !l.setState(connStateExpectResponse) { + panic("two queries running at the same time") + } + + // Can't use l.cn.writeBuf here because it uses the scratch buffer which + // might get overwritten by listenerConnLoop. + b := &writeBuf{ + buf: []byte("Q\x00\x00\x00\x00"), + pos: 1, + } + b.string(q) + l.cn.send(b) + + return nil +} + +// ExecSimpleQuery executes a "simple query" (i.e. one with no bindable +// parameters) on the connection. The possible return values are: +// 1) "executed" is true; the query was executed to completion on the +// database server. If the query failed, err will be set to the error +// returned by the database, otherwise err will be nil. +// 2) If "executed" is false, the query could not be executed on the remote +// server. err will be non-nil. +// +// After a call to ExecSimpleQuery has returned an executed=false value, the +// connection has either been closed or will be closed shortly thereafter, and +// all subsequently executed queries will return an error. +func (l *ListenerConn) ExecSimpleQuery(q string) (executed bool, err error) { + if err = l.acquireSenderLock(); err != nil { + return false, err + } + defer l.releaseSenderLock() + + err = l.sendSimpleQuery(q) + if err != nil { + // We can't know what state the protocol is in, so we need to abandon + // this connection. + l.connectionLock.Lock() + // Set the error pointer if it hasn't been set already; see + // listenerConnMain. + if l.err == nil { + l.err = err + } + l.connectionLock.Unlock() + l.cn.c.Close() + return false, err + } + + // now we just wait for a reply.. + for { + m, ok := <-l.replyChan + if !ok { + // We lost the connection to server, don't bother waiting for a + // a response. err should have been set already. + l.connectionLock.Lock() + err := l.err + l.connectionLock.Unlock() + return false, err + } + switch m.typ { + case 'Z': + // sanity check + if m.err != nil { + panic("m.err != nil") + } + // done; err might or might not be set + return true, err + + case 'E': + // sanity check + if m.err == nil { + panic("m.err == nil") + } + // server responded with an error; ReadyForQuery to follow + err = m.err + + default: + return false, fmt.Errorf("unknown response for simple query: %q", m.typ) + } + } +} + +// Close closes the connection. +func (l *ListenerConn) Close() error { + l.connectionLock.Lock() + if l.err != nil { + l.connectionLock.Unlock() + return errListenerConnClosed + } + l.err = errListenerConnClosed + l.connectionLock.Unlock() + // We can't send anything on the connection without holding senderLock. + // Simply close the net.Conn to wake up everyone operating on it. + return l.cn.c.Close() +} + +// Err returns the reason the connection was closed. It is not safe to call +// this function until l.Notify has been closed. +func (l *ListenerConn) Err() error { + return l.err +} + +var errListenerClosed = errors.New("pq: Listener has been closed") + +// ErrChannelAlreadyOpen is returned from Listen when a channel is already +// open. +var ErrChannelAlreadyOpen = errors.New("pq: channel is already open") + +// ErrChannelNotOpen is returned from Unlisten when a channel is not open. +var ErrChannelNotOpen = errors.New("pq: channel is not open") + +// ListenerEventType is an enumeration of listener event types. +type ListenerEventType int + +const ( + // ListenerEventConnected is emitted only when the database connection + // has been initially initialized. The err argument of the callback + // will always be nil. + ListenerEventConnected ListenerEventType = iota + + // ListenerEventDisconnected is emitted after a database connection has + // been lost, either because of an error or because Close has been + // called. The err argument will be set to the reason the database + // connection was lost. + ListenerEventDisconnected + + // ListenerEventReconnected is emitted after a database connection has + // been re-established after connection loss. The err argument of the + // callback will always be nil. After this event has been emitted, a + // nil pq.Notification is sent on the Listener.Notify channel. + ListenerEventReconnected + + // ListenerEventConnectionAttemptFailed is emitted after a connection + // to the database was attempted, but failed. The err argument will be + // set to an error describing why the connection attempt did not + // succeed. + ListenerEventConnectionAttemptFailed +) + +// EventCallbackType is the event callback type. See also ListenerEventType +// constants' documentation. +type EventCallbackType func(event ListenerEventType, err error) + +// Listener provides an interface for listening to notifications from a +// PostgreSQL database. For general usage information, see section +// "Notifications". +// +// Listener can safely be used from concurrently running goroutines. +type Listener struct { + // Channel for receiving notifications from the database. In some cases a + // nil value will be sent. See section "Notifications" above. + Notify chan *Notification + + name string + minReconnectInterval time.Duration + maxReconnectInterval time.Duration + dialer Dialer + eventCallback EventCallbackType + + lock sync.Mutex + isClosed bool + reconnectCond *sync.Cond + cn *ListenerConn + connNotificationChan <-chan *Notification + channels map[string]struct{} +} + +// NewListener creates a new database connection dedicated to LISTEN / NOTIFY. +// +// name should be set to a connection string to be used to establish the +// database connection (see section "Connection String Parameters" above). +// +// minReconnectInterval controls the duration to wait before trying to +// re-establish the database connection after connection loss. After each +// consecutive failure this interval is doubled, until maxReconnectInterval is +// reached. Successfully completing the connection establishment procedure +// resets the interval back to minReconnectInterval. +// +// The last parameter eventCallback can be set to a function which will be +// called by the Listener when the state of the underlying database connection +// changes. This callback will be called by the goroutine which dispatches the +// notifications over the Notify channel, so you should try to avoid doing +// potentially time-consuming operations from the callback. +func NewListener(name string, + minReconnectInterval time.Duration, + maxReconnectInterval time.Duration, + eventCallback EventCallbackType) *Listener { + return NewDialListener(defaultDialer{}, name, minReconnectInterval, maxReconnectInterval, eventCallback) +} + +// NewDialListener is like NewListener but it takes a Dialer. +func NewDialListener(d Dialer, + name string, + minReconnectInterval time.Duration, + maxReconnectInterval time.Duration, + eventCallback EventCallbackType) *Listener { + + l := &Listener{ + name: name, + minReconnectInterval: minReconnectInterval, + maxReconnectInterval: maxReconnectInterval, + dialer: d, + eventCallback: eventCallback, + + channels: make(map[string]struct{}), + + Notify: make(chan *Notification, 32), + } + l.reconnectCond = sync.NewCond(&l.lock) + + go l.listenerMain() + + return l +} + +// NotificationChannel returns the notification channel for this listener. +// This is the same channel as Notify, and will not be recreated during the +// life time of the Listener. +func (l *Listener) NotificationChannel() <-chan *Notification { + return l.Notify +} + +// Listen starts listening for notifications on a channel. Calls to this +// function will block until an acknowledgement has been received from the +// server. Note that Listener automatically re-establishes the connection +// after connection loss, so this function may block indefinitely if the +// connection can not be re-established. +// +// Listen will only fail in three conditions: +// 1) The channel is already open. The returned error will be +// ErrChannelAlreadyOpen. +// 2) The query was executed on the remote server, but PostgreSQL returned an +// error message in response to the query. The returned error will be a +// pq.Error containing the information the server supplied. +// 3) Close is called on the Listener before the request could be completed. +// +// The channel name is case-sensitive. +func (l *Listener) Listen(channel string) error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + + // The server allows you to issue a LISTEN on a channel which is already + // open, but it seems useful to be able to detect this case to spot for + // mistakes in application logic. If the application genuinely does't + // care, it can check the exported error and ignore it. + _, exists := l.channels[channel] + if exists { + return ErrChannelAlreadyOpen + } + + if l.cn != nil { + // If gotResponse is true but error is set, the query was executed on + // the remote server, but resulted in an error. This should be + // relatively rare, so it's fine if we just pass the error to our + // caller. However, if gotResponse is false, we could not complete the + // query on the remote server and our underlying connection is about + // to go away, so we only add relname to l.channels, and wait for + // resync() to take care of the rest. + gotResponse, err := l.cn.Listen(channel) + if gotResponse && err != nil { + return err + } + } + + l.channels[channel] = struct{}{} + for l.cn == nil { + l.reconnectCond.Wait() + // we let go of the mutex for a while + if l.isClosed { + return errListenerClosed + } + } + + return nil +} + +// Unlisten removes a channel from the Listener's channel list. Returns +// ErrChannelNotOpen if the Listener is not listening on the specified channel. +// Returns immediately with no error if there is no connection. Note that you +// might still get notifications for this channel even after Unlisten has +// returned. +// +// The channel name is case-sensitive. +func (l *Listener) Unlisten(channel string) error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + + // Similarly to LISTEN, this is not an error in Postgres, but it seems + // useful to distinguish from the normal conditions. + _, exists := l.channels[channel] + if !exists { + return ErrChannelNotOpen + } + + if l.cn != nil { + // Similarly to Listen (see comment in that function), the caller + // should only be bothered with an error if it came from the backend as + // a response to our query. + gotResponse, err := l.cn.Unlisten(channel) + if gotResponse && err != nil { + return err + } + } + + // Don't bother waiting for resync if there's no connection. + delete(l.channels, channel) + return nil +} + +// UnlistenAll removes all channels from the Listener's channel list. Returns +// immediately with no error if there is no connection. Note that you might +// still get notifications for any of the deleted channels even after +// UnlistenAll has returned. +func (l *Listener) UnlistenAll() error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + + if l.cn != nil { + // Similarly to Listen (see comment in that function), the caller + // should only be bothered with an error if it came from the backend as + // a response to our query. + gotResponse, err := l.cn.UnlistenAll() + if gotResponse && err != nil { + return err + } + } + + // Don't bother waiting for resync if there's no connection. + l.channels = make(map[string]struct{}) + return nil +} + +// Ping the remote server to make sure it's alive. Non-nil return value means +// that there is no active connection. +func (l *Listener) Ping() error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + if l.cn == nil { + return errors.New("no connection") + } + + return l.cn.Ping() +} + +// Clean up after losing the server connection. Returns l.cn.Err(), which +// should have the reason the connection was lost. +func (l *Listener) disconnectCleanup() error { + l.lock.Lock() + defer l.lock.Unlock() + + // sanity check; can't look at Err() until the channel has been closed + select { + case _, ok := <-l.connNotificationChan: + if ok { + panic("connNotificationChan not closed") + } + default: + panic("connNotificationChan not closed") + } + + err := l.cn.Err() + l.cn.Close() + l.cn = nil + return err +} + +// Synchronize the list of channels we want to be listening on with the server +// after the connection has been established. +func (l *Listener) resync(cn *ListenerConn, notificationChan <-chan *Notification) error { + doneChan := make(chan error) + go func(notificationChan <-chan *Notification) { + for channel := range l.channels { + // If we got a response, return that error to our caller as it's + // going to be more descriptive than cn.Err(). + gotResponse, err := cn.Listen(channel) + if gotResponse && err != nil { + doneChan <- err + return + } + + // If we couldn't reach the server, wait for notificationChan to + // close and then return the error message from the connection, as + // per ListenerConn's interface. + if err != nil { + for range notificationChan { + } + doneChan <- cn.Err() + return + } + } + doneChan <- nil + }(notificationChan) + + // Ignore notifications while synchronization is going on to avoid + // deadlocks. We have to send a nil notification over Notify anyway as + // we can't possibly know which notifications (if any) were lost while + // the connection was down, so there's no reason to try and process + // these messages at all. + for { + select { + case _, ok := <-notificationChan: + if !ok { + notificationChan = nil + } + + case err := <-doneChan: + return err + } + } +} + +// caller should NOT be holding l.lock +func (l *Listener) closed() bool { + l.lock.Lock() + defer l.lock.Unlock() + + return l.isClosed +} + +func (l *Listener) connect() error { + notificationChan := make(chan *Notification, 32) + cn, err := newDialListenerConn(l.dialer, l.name, notificationChan) + if err != nil { + return err + } + + l.lock.Lock() + defer l.lock.Unlock() + + err = l.resync(cn, notificationChan) + if err != nil { + cn.Close() + return err + } + + l.cn = cn + l.connNotificationChan = notificationChan + l.reconnectCond.Broadcast() + + return nil +} + +// Close disconnects the Listener from the database and shuts it down. +// Subsequent calls to its methods will return an error. Close returns an +// error if the connection has already been closed. +func (l *Listener) Close() error { + l.lock.Lock() + defer l.lock.Unlock() + + if l.isClosed { + return errListenerClosed + } + + if l.cn != nil { + l.cn.Close() + } + l.isClosed = true + + // Unblock calls to Listen() + l.reconnectCond.Broadcast() + + return nil +} + +func (l *Listener) emitEvent(event ListenerEventType, err error) { + if l.eventCallback != nil { + l.eventCallback(event, err) + } +} + +// Main logic here: maintain a connection to the server when possible, wait +// for notifications and emit events. +func (l *Listener) listenerConnLoop() { + var nextReconnect time.Time + + reconnectInterval := l.minReconnectInterval + for { + for { + err := l.connect() + if err == nil { + break + } + + if l.closed() { + return + } + l.emitEvent(ListenerEventConnectionAttemptFailed, err) + + time.Sleep(reconnectInterval) + reconnectInterval *= 2 + if reconnectInterval > l.maxReconnectInterval { + reconnectInterval = l.maxReconnectInterval + } + } + + if nextReconnect.IsZero() { + l.emitEvent(ListenerEventConnected, nil) + } else { + l.emitEvent(ListenerEventReconnected, nil) + l.Notify <- nil + } + + reconnectInterval = l.minReconnectInterval + nextReconnect = time.Now().Add(reconnectInterval) + + for { + notification, ok := <-l.connNotificationChan + if !ok { + // lost connection, loop again + break + } + l.Notify <- notification + } + + err := l.disconnectCleanup() + if l.closed() { + return + } + l.emitEvent(ListenerEventDisconnected, err) + + time.Sleep(time.Until(nextReconnect)) + } +} + +func (l *Listener) listenerMain() { + l.listenerConnLoop() + close(l.Notify) +} diff --git a/vendor/github.com/lib/pq/oid/doc.go b/vendor/github.com/lib/pq/oid/doc.go new file mode 100644 index 0000000000..caaede2489 --- /dev/null +++ b/vendor/github.com/lib/pq/oid/doc.go @@ -0,0 +1,6 @@ +// Package oid contains OID constants +// as defined by the Postgres server. +package oid + +// Oid is a Postgres Object ID. +type Oid uint32 diff --git a/vendor/github.com/lib/pq/oid/types.go b/vendor/github.com/lib/pq/oid/types.go new file mode 100644 index 0000000000..ecc84c2c86 --- /dev/null +++ b/vendor/github.com/lib/pq/oid/types.go @@ -0,0 +1,343 @@ +// Code generated by gen.go. DO NOT EDIT. + +package oid + +const ( + T_bool Oid = 16 + T_bytea Oid = 17 + T_char Oid = 18 + T_name Oid = 19 + T_int8 Oid = 20 + T_int2 Oid = 21 + T_int2vector Oid = 22 + T_int4 Oid = 23 + T_regproc Oid = 24 + T_text Oid = 25 + T_oid Oid = 26 + T_tid Oid = 27 + T_xid Oid = 28 + T_cid Oid = 29 + T_oidvector Oid = 30 + T_pg_ddl_command Oid = 32 + T_pg_type Oid = 71 + T_pg_attribute Oid = 75 + T_pg_proc Oid = 81 + T_pg_class Oid = 83 + T_json Oid = 114 + T_xml Oid = 142 + T__xml Oid = 143 + T_pg_node_tree Oid = 194 + T__json Oid = 199 + T_smgr Oid = 210 + T_index_am_handler Oid = 325 + T_point Oid = 600 + T_lseg Oid = 601 + T_path Oid = 602 + T_box Oid = 603 + T_polygon Oid = 604 + T_line Oid = 628 + T__line Oid = 629 + T_cidr Oid = 650 + T__cidr Oid = 651 + T_float4 Oid = 700 + T_float8 Oid = 701 + T_abstime Oid = 702 + T_reltime Oid = 703 + T_tinterval Oid = 704 + T_unknown Oid = 705 + T_circle Oid = 718 + T__circle Oid = 719 + T_money Oid = 790 + T__money Oid = 791 + T_macaddr Oid = 829 + T_inet Oid = 869 + T__bool Oid = 1000 + T__bytea Oid = 1001 + T__char Oid = 1002 + T__name Oid = 1003 + T__int2 Oid = 1005 + T__int2vector Oid = 1006 + T__int4 Oid = 1007 + T__regproc Oid = 1008 + T__text Oid = 1009 + T__tid Oid = 1010 + T__xid Oid = 1011 + T__cid Oid = 1012 + T__oidvector Oid = 1013 + T__bpchar Oid = 1014 + T__varchar Oid = 1015 + T__int8 Oid = 1016 + T__point Oid = 1017 + T__lseg Oid = 1018 + T__path Oid = 1019 + T__box Oid = 1020 + T__float4 Oid = 1021 + T__float8 Oid = 1022 + T__abstime Oid = 1023 + T__reltime Oid = 1024 + T__tinterval Oid = 1025 + T__polygon Oid = 1027 + T__oid Oid = 1028 + T_aclitem Oid = 1033 + T__aclitem Oid = 1034 + T__macaddr Oid = 1040 + T__inet Oid = 1041 + T_bpchar Oid = 1042 + T_varchar Oid = 1043 + T_date Oid = 1082 + T_time Oid = 1083 + T_timestamp Oid = 1114 + T__timestamp Oid = 1115 + T__date Oid = 1182 + T__time Oid = 1183 + T_timestamptz Oid = 1184 + T__timestamptz Oid = 1185 + T_interval Oid = 1186 + T__interval Oid = 1187 + T__numeric Oid = 1231 + T_pg_database Oid = 1248 + T__cstring Oid = 1263 + T_timetz Oid = 1266 + T__timetz Oid = 1270 + T_bit Oid = 1560 + T__bit Oid = 1561 + T_varbit Oid = 1562 + T__varbit Oid = 1563 + T_numeric Oid = 1700 + T_refcursor Oid = 1790 + T__refcursor Oid = 2201 + T_regprocedure Oid = 2202 + T_regoper Oid = 2203 + T_regoperator Oid = 2204 + T_regclass Oid = 2205 + T_regtype Oid = 2206 + T__regprocedure Oid = 2207 + T__regoper Oid = 2208 + T__regoperator Oid = 2209 + T__regclass Oid = 2210 + T__regtype Oid = 2211 + T_record Oid = 2249 + T_cstring Oid = 2275 + T_any Oid = 2276 + T_anyarray Oid = 2277 + T_void Oid = 2278 + T_trigger Oid = 2279 + T_language_handler Oid = 2280 + T_internal Oid = 2281 + T_opaque Oid = 2282 + T_anyelement Oid = 2283 + T__record Oid = 2287 + T_anynonarray Oid = 2776 + T_pg_authid Oid = 2842 + T_pg_auth_members Oid = 2843 + T__txid_snapshot Oid = 2949 + T_uuid Oid = 2950 + T__uuid Oid = 2951 + T_txid_snapshot Oid = 2970 + T_fdw_handler Oid = 3115 + T_pg_lsn Oid = 3220 + T__pg_lsn Oid = 3221 + T_tsm_handler Oid = 3310 + T_anyenum Oid = 3500 + T_tsvector Oid = 3614 + T_tsquery Oid = 3615 + T_gtsvector Oid = 3642 + T__tsvector Oid = 3643 + T__gtsvector Oid = 3644 + T__tsquery Oid = 3645 + T_regconfig Oid = 3734 + T__regconfig Oid = 3735 + T_regdictionary Oid = 3769 + T__regdictionary Oid = 3770 + T_jsonb Oid = 3802 + T__jsonb Oid = 3807 + T_anyrange Oid = 3831 + T_event_trigger Oid = 3838 + T_int4range Oid = 3904 + T__int4range Oid = 3905 + T_numrange Oid = 3906 + T__numrange Oid = 3907 + T_tsrange Oid = 3908 + T__tsrange Oid = 3909 + T_tstzrange Oid = 3910 + T__tstzrange Oid = 3911 + T_daterange Oid = 3912 + T__daterange Oid = 3913 + T_int8range Oid = 3926 + T__int8range Oid = 3927 + T_pg_shseclabel Oid = 4066 + T_regnamespace Oid = 4089 + T__regnamespace Oid = 4090 + T_regrole Oid = 4096 + T__regrole Oid = 4097 +) + +var TypeName = map[Oid]string{ + T_bool: "BOOL", + T_bytea: "BYTEA", + T_char: "CHAR", + T_name: "NAME", + T_int8: "INT8", + T_int2: "INT2", + T_int2vector: "INT2VECTOR", + T_int4: "INT4", + T_regproc: "REGPROC", + T_text: "TEXT", + T_oid: "OID", + T_tid: "TID", + T_xid: "XID", + T_cid: "CID", + T_oidvector: "OIDVECTOR", + T_pg_ddl_command: "PG_DDL_COMMAND", + T_pg_type: "PG_TYPE", + T_pg_attribute: "PG_ATTRIBUTE", + T_pg_proc: "PG_PROC", + T_pg_class: "PG_CLASS", + T_json: "JSON", + T_xml: "XML", + T__xml: "_XML", + T_pg_node_tree: "PG_NODE_TREE", + T__json: "_JSON", + T_smgr: "SMGR", + T_index_am_handler: "INDEX_AM_HANDLER", + T_point: "POINT", + T_lseg: "LSEG", + T_path: "PATH", + T_box: "BOX", + T_polygon: "POLYGON", + T_line: "LINE", + T__line: "_LINE", + T_cidr: "CIDR", + T__cidr: "_CIDR", + T_float4: "FLOAT4", + T_float8: "FLOAT8", + T_abstime: "ABSTIME", + T_reltime: "RELTIME", + T_tinterval: "TINTERVAL", + T_unknown: "UNKNOWN", + T_circle: "CIRCLE", + T__circle: "_CIRCLE", + T_money: "MONEY", + T__money: "_MONEY", + T_macaddr: "MACADDR", + T_inet: "INET", + T__bool: "_BOOL", + T__bytea: "_BYTEA", + T__char: "_CHAR", + T__name: "_NAME", + T__int2: "_INT2", + T__int2vector: "_INT2VECTOR", + T__int4: "_INT4", + T__regproc: "_REGPROC", + T__text: "_TEXT", + T__tid: "_TID", + T__xid: "_XID", + T__cid: "_CID", + T__oidvector: "_OIDVECTOR", + T__bpchar: "_BPCHAR", + T__varchar: "_VARCHAR", + T__int8: "_INT8", + T__point: "_POINT", + T__lseg: "_LSEG", + T__path: "_PATH", + T__box: "_BOX", + T__float4: "_FLOAT4", + T__float8: "_FLOAT8", + T__abstime: "_ABSTIME", + T__reltime: "_RELTIME", + T__tinterval: "_TINTERVAL", + T__polygon: "_POLYGON", + T__oid: "_OID", + T_aclitem: "ACLITEM", + T__aclitem: "_ACLITEM", + T__macaddr: "_MACADDR", + T__inet: "_INET", + T_bpchar: "BPCHAR", + T_varchar: "VARCHAR", + T_date: "DATE", + T_time: "TIME", + T_timestamp: "TIMESTAMP", + T__timestamp: "_TIMESTAMP", + T__date: "_DATE", + T__time: "_TIME", + T_timestamptz: "TIMESTAMPTZ", + T__timestamptz: "_TIMESTAMPTZ", + T_interval: "INTERVAL", + T__interval: "_INTERVAL", + T__numeric: "_NUMERIC", + T_pg_database: "PG_DATABASE", + T__cstring: "_CSTRING", + T_timetz: "TIMETZ", + T__timetz: "_TIMETZ", + T_bit: "BIT", + T__bit: "_BIT", + T_varbit: "VARBIT", + T__varbit: "_VARBIT", + T_numeric: "NUMERIC", + T_refcursor: "REFCURSOR", + T__refcursor: "_REFCURSOR", + T_regprocedure: "REGPROCEDURE", + T_regoper: "REGOPER", + T_regoperator: "REGOPERATOR", + T_regclass: "REGCLASS", + T_regtype: "REGTYPE", + T__regprocedure: "_REGPROCEDURE", + T__regoper: "_REGOPER", + T__regoperator: "_REGOPERATOR", + T__regclass: "_REGCLASS", + T__regtype: "_REGTYPE", + T_record: "RECORD", + T_cstring: "CSTRING", + T_any: "ANY", + T_anyarray: "ANYARRAY", + T_void: "VOID", + T_trigger: "TRIGGER", + T_language_handler: "LANGUAGE_HANDLER", + T_internal: "INTERNAL", + T_opaque: "OPAQUE", + T_anyelement: "ANYELEMENT", + T__record: "_RECORD", + T_anynonarray: "ANYNONARRAY", + T_pg_authid: "PG_AUTHID", + T_pg_auth_members: "PG_AUTH_MEMBERS", + T__txid_snapshot: "_TXID_SNAPSHOT", + T_uuid: "UUID", + T__uuid: "_UUID", + T_txid_snapshot: "TXID_SNAPSHOT", + T_fdw_handler: "FDW_HANDLER", + T_pg_lsn: "PG_LSN", + T__pg_lsn: "_PG_LSN", + T_tsm_handler: "TSM_HANDLER", + T_anyenum: "ANYENUM", + T_tsvector: "TSVECTOR", + T_tsquery: "TSQUERY", + T_gtsvector: "GTSVECTOR", + T__tsvector: "_TSVECTOR", + T__gtsvector: "_GTSVECTOR", + T__tsquery: "_TSQUERY", + T_regconfig: "REGCONFIG", + T__regconfig: "_REGCONFIG", + T_regdictionary: "REGDICTIONARY", + T__regdictionary: "_REGDICTIONARY", + T_jsonb: "JSONB", + T__jsonb: "_JSONB", + T_anyrange: "ANYRANGE", + T_event_trigger: "EVENT_TRIGGER", + T_int4range: "INT4RANGE", + T__int4range: "_INT4RANGE", + T_numrange: "NUMRANGE", + T__numrange: "_NUMRANGE", + T_tsrange: "TSRANGE", + T__tsrange: "_TSRANGE", + T_tstzrange: "TSTZRANGE", + T__tstzrange: "_TSTZRANGE", + T_daterange: "DATERANGE", + T__daterange: "_DATERANGE", + T_int8range: "INT8RANGE", + T__int8range: "_INT8RANGE", + T_pg_shseclabel: "PG_SHSECLABEL", + T_regnamespace: "REGNAMESPACE", + T__regnamespace: "_REGNAMESPACE", + T_regrole: "REGROLE", + T__regrole: "_REGROLE", +} diff --git a/vendor/github.com/lib/pq/rows.go b/vendor/github.com/lib/pq/rows.go new file mode 100644 index 0000000000..c6aa5b9a36 --- /dev/null +++ b/vendor/github.com/lib/pq/rows.go @@ -0,0 +1,93 @@ +package pq + +import ( + "math" + "reflect" + "time" + + "github.com/lib/pq/oid" +) + +const headerSize = 4 + +type fieldDesc struct { + // The object ID of the data type. + OID oid.Oid + // The data type size (see pg_type.typlen). + // Note that negative values denote variable-width types. + Len int + // The type modifier (see pg_attribute.atttypmod). + // The meaning of the modifier is type-specific. + Mod int +} + +func (fd fieldDesc) Type() reflect.Type { + switch fd.OID { + case oid.T_int8: + return reflect.TypeOf(int64(0)) + case oid.T_int4: + return reflect.TypeOf(int32(0)) + case oid.T_int2: + return reflect.TypeOf(int16(0)) + case oid.T_varchar, oid.T_text: + return reflect.TypeOf("") + case oid.T_bool: + return reflect.TypeOf(false) + case oid.T_date, oid.T_time, oid.T_timetz, oid.T_timestamp, oid.T_timestamptz: + return reflect.TypeOf(time.Time{}) + case oid.T_bytea: + return reflect.TypeOf([]byte(nil)) + default: + return reflect.TypeOf(new(interface{})).Elem() + } +} + +func (fd fieldDesc) Name() string { + return oid.TypeName[fd.OID] +} + +func (fd fieldDesc) Length() (length int64, ok bool) { + switch fd.OID { + case oid.T_text, oid.T_bytea: + return math.MaxInt64, true + case oid.T_varchar, oid.T_bpchar: + return int64(fd.Mod - headerSize), true + default: + return 0, false + } +} + +func (fd fieldDesc) PrecisionScale() (precision, scale int64, ok bool) { + switch fd.OID { + case oid.T_numeric, oid.T__numeric: + mod := fd.Mod - headerSize + precision = int64((mod >> 16) & 0xffff) + scale = int64(mod & 0xffff) + return precision, scale, true + default: + return 0, 0, false + } +} + +// ColumnTypeScanType returns the value type that can be used to scan types into. +func (rs *rows) ColumnTypeScanType(index int) reflect.Type { + return rs.colTyps[index].Type() +} + +// ColumnTypeDatabaseTypeName return the database system type name. +func (rs *rows) ColumnTypeDatabaseTypeName(index int) string { + return rs.colTyps[index].Name() +} + +// ColumnTypeLength returns the length of the column type if the column is a +// variable length type. If the column is not a variable length type ok +// should return false. +func (rs *rows) ColumnTypeLength(index int) (length int64, ok bool) { + return rs.colTyps[index].Length() +} + +// ColumnTypePrecisionScale should return the precision and scale for decimal +// types. If not applicable, ok should be false. +func (rs *rows) ColumnTypePrecisionScale(index int) (precision, scale int64, ok bool) { + return rs.colTyps[index].PrecisionScale() +} diff --git a/vendor/github.com/lib/pq/scram/scram.go b/vendor/github.com/lib/pq/scram/scram.go new file mode 100644 index 0000000000..477216b600 --- /dev/null +++ b/vendor/github.com/lib/pq/scram/scram.go @@ -0,0 +1,264 @@ +// Copyright (c) 2014 - Gustavo Niemeyer +// +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are met: +// +// 1. Redistributions of source code must retain the above copyright notice, this +// list of conditions and the following disclaimer. +// 2. Redistributions in binary form must reproduce the above copyright notice, +// this list of conditions and the following disclaimer in the documentation +// and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package scram implements a SCRAM-{SHA-1,etc} client per RFC5802. +// +// http://tools.ietf.org/html/rfc5802 +// +package scram + +import ( + "bytes" + "crypto/hmac" + "crypto/rand" + "encoding/base64" + "fmt" + "hash" + "strconv" + "strings" +) + +// Client implements a SCRAM-* client (SCRAM-SHA-1, SCRAM-SHA-256, etc). +// +// A Client may be used within a SASL conversation with logic resembling: +// +// var in []byte +// var client = scram.NewClient(sha1.New, user, pass) +// for client.Step(in) { +// out := client.Out() +// // send out to server +// in := serverOut +// } +// if client.Err() != nil { +// // auth failed +// } +// +type Client struct { + newHash func() hash.Hash + + user string + pass string + step int + out bytes.Buffer + err error + + clientNonce []byte + serverNonce []byte + saltedPass []byte + authMsg bytes.Buffer +} + +// NewClient returns a new SCRAM-* client with the provided hash algorithm. +// +// For SCRAM-SHA-256, for example, use: +// +// client := scram.NewClient(sha256.New, user, pass) +// +func NewClient(newHash func() hash.Hash, user, pass string) *Client { + c := &Client{ + newHash: newHash, + user: user, + pass: pass, + } + c.out.Grow(256) + c.authMsg.Grow(256) + return c +} + +// Out returns the data to be sent to the server in the current step. +func (c *Client) Out() []byte { + if c.out.Len() == 0 { + return nil + } + return c.out.Bytes() +} + +// Err returns the error that occurred, or nil if there were no errors. +func (c *Client) Err() error { + return c.err +} + +// SetNonce sets the client nonce to the provided value. +// If not set, the nonce is generated automatically out of crypto/rand on the first step. +func (c *Client) SetNonce(nonce []byte) { + c.clientNonce = nonce +} + +var escaper = strings.NewReplacer("=", "=3D", ",", "=2C") + +// Step processes the incoming data from the server and makes the +// next round of data for the server available via Client.Out. +// Step returns false if there are no errors and more data is +// still expected. +func (c *Client) Step(in []byte) bool { + c.out.Reset() + if c.step > 2 || c.err != nil { + return false + } + c.step++ + switch c.step { + case 1: + c.err = c.step1(in) + case 2: + c.err = c.step2(in) + case 3: + c.err = c.step3(in) + } + return c.step > 2 || c.err != nil +} + +func (c *Client) step1(in []byte) error { + if len(c.clientNonce) == 0 { + const nonceLen = 16 + buf := make([]byte, nonceLen+b64.EncodedLen(nonceLen)) + if _, err := rand.Read(buf[:nonceLen]); err != nil { + return fmt.Errorf("cannot read random SCRAM-SHA-256 nonce from operating system: %v", err) + } + c.clientNonce = buf[nonceLen:] + b64.Encode(c.clientNonce, buf[:nonceLen]) + } + c.authMsg.WriteString("n=") + escaper.WriteString(&c.authMsg, c.user) + c.authMsg.WriteString(",r=") + c.authMsg.Write(c.clientNonce) + + c.out.WriteString("n,,") + c.out.Write(c.authMsg.Bytes()) + return nil +} + +var b64 = base64.StdEncoding + +func (c *Client) step2(in []byte) error { + c.authMsg.WriteByte(',') + c.authMsg.Write(in) + + fields := bytes.Split(in, []byte(",")) + if len(fields) != 3 { + return fmt.Errorf("expected 3 fields in first SCRAM-SHA-256 server message, got %d: %q", len(fields), in) + } + if !bytes.HasPrefix(fields[0], []byte("r=")) || len(fields[0]) < 2 { + return fmt.Errorf("server sent an invalid SCRAM-SHA-256 nonce: %q", fields[0]) + } + if !bytes.HasPrefix(fields[1], []byte("s=")) || len(fields[1]) < 6 { + return fmt.Errorf("server sent an invalid SCRAM-SHA-256 salt: %q", fields[1]) + } + if !bytes.HasPrefix(fields[2], []byte("i=")) || len(fields[2]) < 6 { + return fmt.Errorf("server sent an invalid SCRAM-SHA-256 iteration count: %q", fields[2]) + } + + c.serverNonce = fields[0][2:] + if !bytes.HasPrefix(c.serverNonce, c.clientNonce) { + return fmt.Errorf("server SCRAM-SHA-256 nonce is not prefixed by client nonce: got %q, want %q+\"...\"", c.serverNonce, c.clientNonce) + } + + salt := make([]byte, b64.DecodedLen(len(fields[1][2:]))) + n, err := b64.Decode(salt, fields[1][2:]) + if err != nil { + return fmt.Errorf("cannot decode SCRAM-SHA-256 salt sent by server: %q", fields[1]) + } + salt = salt[:n] + iterCount, err := strconv.Atoi(string(fields[2][2:])) + if err != nil { + return fmt.Errorf("server sent an invalid SCRAM-SHA-256 iteration count: %q", fields[2]) + } + c.saltPassword(salt, iterCount) + + c.authMsg.WriteString(",c=biws,r=") + c.authMsg.Write(c.serverNonce) + + c.out.WriteString("c=biws,r=") + c.out.Write(c.serverNonce) + c.out.WriteString(",p=") + c.out.Write(c.clientProof()) + return nil +} + +func (c *Client) step3(in []byte) error { + var isv, ise bool + var fields = bytes.Split(in, []byte(",")) + if len(fields) == 1 { + isv = bytes.HasPrefix(fields[0], []byte("v=")) + ise = bytes.HasPrefix(fields[0], []byte("e=")) + } + if ise { + return fmt.Errorf("SCRAM-SHA-256 authentication error: %s", fields[0][2:]) + } else if !isv { + return fmt.Errorf("unsupported SCRAM-SHA-256 final message from server: %q", in) + } + if !bytes.Equal(c.serverSignature(), fields[0][2:]) { + return fmt.Errorf("cannot authenticate SCRAM-SHA-256 server signature: %q", fields[0][2:]) + } + return nil +} + +func (c *Client) saltPassword(salt []byte, iterCount int) { + mac := hmac.New(c.newHash, []byte(c.pass)) + mac.Write(salt) + mac.Write([]byte{0, 0, 0, 1}) + ui := mac.Sum(nil) + hi := make([]byte, len(ui)) + copy(hi, ui) + for i := 1; i < iterCount; i++ { + mac.Reset() + mac.Write(ui) + mac.Sum(ui[:0]) + for j, b := range ui { + hi[j] ^= b + } + } + c.saltedPass = hi +} + +func (c *Client) clientProof() []byte { + mac := hmac.New(c.newHash, c.saltedPass) + mac.Write([]byte("Client Key")) + clientKey := mac.Sum(nil) + hash := c.newHash() + hash.Write(clientKey) + storedKey := hash.Sum(nil) + mac = hmac.New(c.newHash, storedKey) + mac.Write(c.authMsg.Bytes()) + clientProof := mac.Sum(nil) + for i, b := range clientKey { + clientProof[i] ^= b + } + clientProof64 := make([]byte, b64.EncodedLen(len(clientProof))) + b64.Encode(clientProof64, clientProof) + return clientProof64 +} + +func (c *Client) serverSignature() []byte { + mac := hmac.New(c.newHash, c.saltedPass) + mac.Write([]byte("Server Key")) + serverKey := mac.Sum(nil) + + mac = hmac.New(c.newHash, serverKey) + mac.Write(c.authMsg.Bytes()) + serverSignature := mac.Sum(nil) + + encoded := make([]byte, b64.EncodedLen(len(serverSignature))) + b64.Encode(encoded, serverSignature) + return encoded +} diff --git a/vendor/github.com/lib/pq/ssl.go b/vendor/github.com/lib/pq/ssl.go new file mode 100644 index 0000000000..e5eb928954 --- /dev/null +++ b/vendor/github.com/lib/pq/ssl.go @@ -0,0 +1,193 @@ +package pq + +import ( + "crypto/tls" + "crypto/x509" + "io/ioutil" + "net" + "os" + "os/user" + "path/filepath" +) + +// ssl generates a function to upgrade a net.Conn based on the "sslmode" and +// related settings. The function is nil when no upgrade should take place. +func ssl(o values) (func(net.Conn) (net.Conn, error), error) { + verifyCaOnly := false + tlsConf := tls.Config{} + switch mode := o["sslmode"]; mode { + // "require" is the default. + case "", "require": + // We must skip TLS's own verification since it requires full + // verification since Go 1.3. + tlsConf.InsecureSkipVerify = true + + // From http://www.postgresql.org/docs/current/static/libpq-ssl.html: + // + // Note: For backwards compatibility with earlier versions of + // PostgreSQL, if a root CA file exists, the behavior of + // sslmode=require will be the same as that of verify-ca, meaning the + // server certificate is validated against the CA. Relying on this + // behavior is discouraged, and applications that need certificate + // validation should always use verify-ca or verify-full. + if sslrootcert, ok := o["sslrootcert"]; ok { + if _, err := os.Stat(sslrootcert); err == nil { + verifyCaOnly = true + } else { + delete(o, "sslrootcert") + } + } + case "verify-ca": + // We must skip TLS's own verification since it requires full + // verification since Go 1.3. + tlsConf.InsecureSkipVerify = true + verifyCaOnly = true + case "verify-full": + tlsConf.ServerName = o["host"] + case "disable": + return nil, nil + default: + return nil, fmterrorf(`unsupported sslmode %q; only "require" (default), "verify-full", "verify-ca", and "disable" supported`, mode) + } + + err := sslClientCertificates(&tlsConf, o) + if err != nil { + return nil, err + } + err = sslCertificateAuthority(&tlsConf, o) + if err != nil { + return nil, err + } + + // Accept renegotiation requests initiated by the backend. + // + // Renegotiation was deprecated then removed from PostgreSQL 9.5, but + // the default configuration of older versions has it enabled. Redshift + // also initiates renegotiations and cannot be reconfigured. + tlsConf.Renegotiation = tls.RenegotiateFreelyAsClient + + return func(conn net.Conn) (net.Conn, error) { + client := tls.Client(conn, &tlsConf) + if verifyCaOnly { + err := sslVerifyCertificateAuthority(client, &tlsConf) + if err != nil { + return nil, err + } + } + return client, nil + }, nil +} + +// sslClientCertificates adds the certificate specified in the "sslcert" and +// "sslkey" settings, or if they aren't set, from the .postgresql directory +// in the user's home directory. The configured files must exist and have +// the correct permissions. +func sslClientCertificates(tlsConf *tls.Config, o values) error { + sslinline := o["sslinline"] + if sslinline == "true" { + cert, err := tls.X509KeyPair([]byte(o["sslcert"]), []byte(o["sslkey"])) + if err != nil { + return err + } + tlsConf.Certificates = []tls.Certificate{cert} + return nil + } + + // user.Current() might fail when cross-compiling. We have to ignore the + // error and continue without home directory defaults, since we wouldn't + // know from where to load them. + user, _ := user.Current() + + // In libpq, the client certificate is only loaded if the setting is not blank. + // + // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1036-L1037 + sslcert := o["sslcert"] + if len(sslcert) == 0 && user != nil { + sslcert = filepath.Join(user.HomeDir, ".postgresql", "postgresql.crt") + } + // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1045 + if len(sslcert) == 0 { + return nil + } + // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1050:L1054 + if _, err := os.Stat(sslcert); os.IsNotExist(err) { + return nil + } else if err != nil { + return err + } + + // In libpq, the ssl key is only loaded if the setting is not blank. + // + // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L1123-L1222 + sslkey := o["sslkey"] + if len(sslkey) == 0 && user != nil { + sslkey = filepath.Join(user.HomeDir, ".postgresql", "postgresql.key") + } + + if len(sslkey) > 0 { + if err := sslKeyPermissions(sslkey); err != nil { + return err + } + } + + cert, err := tls.LoadX509KeyPair(sslcert, sslkey) + if err != nil { + return err + } + + tlsConf.Certificates = []tls.Certificate{cert} + return nil +} + +// sslCertificateAuthority adds the RootCA specified in the "sslrootcert" setting. +func sslCertificateAuthority(tlsConf *tls.Config, o values) error { + // In libpq, the root certificate is only loaded if the setting is not blank. + // + // https://github.com/postgres/postgres/blob/REL9_6_2/src/interfaces/libpq/fe-secure-openssl.c#L950-L951 + if sslrootcert := o["sslrootcert"]; len(sslrootcert) > 0 { + tlsConf.RootCAs = x509.NewCertPool() + + sslinline := o["sslinline"] + + var cert []byte + if sslinline == "true" { + cert = []byte(sslrootcert) + } else { + var err error + cert, err = ioutil.ReadFile(sslrootcert) + if err != nil { + return err + } + } + + if !tlsConf.RootCAs.AppendCertsFromPEM(cert) { + return fmterrorf("couldn't parse pem in sslrootcert") + } + } + + return nil +} + +// sslVerifyCertificateAuthority carries out a TLS handshake to the server and +// verifies the presented certificate against the CA, i.e. the one specified in +// sslrootcert or the system CA if sslrootcert was not specified. +func sslVerifyCertificateAuthority(client *tls.Conn, tlsConf *tls.Config) error { + err := client.Handshake() + if err != nil { + return err + } + certs := client.ConnectionState().PeerCertificates + opts := x509.VerifyOptions{ + DNSName: client.ConnectionState().ServerName, + Intermediates: x509.NewCertPool(), + Roots: tlsConf.RootCAs, + } + for i, cert := range certs { + if i == 0 { + continue + } + opts.Intermediates.AddCert(cert) + } + _, err = certs[0].Verify(opts) + return err +} diff --git a/vendor/github.com/lib/pq/ssl_permissions.go b/vendor/github.com/lib/pq/ssl_permissions.go new file mode 100644 index 0000000000..014af6a169 --- /dev/null +++ b/vendor/github.com/lib/pq/ssl_permissions.go @@ -0,0 +1,21 @@ +//go:build !windows +// +build !windows + +package pq + +import "os" + +// sslKeyPermissions checks the permissions on user-supplied ssl key files. +// The key file should have very little access. +// +// libpq does not check key file permissions on Windows. +func sslKeyPermissions(sslkey string) error { + info, err := os.Stat(sslkey) + if err != nil { + return err + } + if info.Mode().Perm()&0077 != 0 { + return ErrSSLKeyHasWorldPermissions + } + return nil +} diff --git a/vendor/github.com/lib/pq/ssl_windows.go b/vendor/github.com/lib/pq/ssl_windows.go new file mode 100644 index 0000000000..73663c8f15 --- /dev/null +++ b/vendor/github.com/lib/pq/ssl_windows.go @@ -0,0 +1,10 @@ +//go:build windows +// +build windows + +package pq + +// sslKeyPermissions checks the permissions on user-supplied ssl key files. +// The key file should have very little access. +// +// libpq does not check key file permissions on Windows. +func sslKeyPermissions(string) error { return nil } diff --git a/vendor/github.com/lib/pq/url.go b/vendor/github.com/lib/pq/url.go new file mode 100644 index 0000000000..aec6e95be8 --- /dev/null +++ b/vendor/github.com/lib/pq/url.go @@ -0,0 +1,76 @@ +package pq + +import ( + "fmt" + "net" + nurl "net/url" + "sort" + "strings" +) + +// ParseURL no longer needs to be used by clients of this library since supplying a URL as a +// connection string to sql.Open() is now supported: +// +// sql.Open("postgres", "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full") +// +// It remains exported here for backwards-compatibility. +// +// ParseURL converts a url to a connection string for driver.Open. +// Example: +// +// "postgres://bob:secret@1.2.3.4:5432/mydb?sslmode=verify-full" +// +// converts to: +// +// "user=bob password=secret host=1.2.3.4 port=5432 dbname=mydb sslmode=verify-full" +// +// A minimal example: +// +// "postgres://" +// +// This will be blank, causing driver.Open to use all of the defaults +func ParseURL(url string) (string, error) { + u, err := nurl.Parse(url) + if err != nil { + return "", err + } + + if u.Scheme != "postgres" && u.Scheme != "postgresql" { + return "", fmt.Errorf("invalid connection protocol: %s", u.Scheme) + } + + var kvs []string + escaper := strings.NewReplacer(`'`, `\'`, `\`, `\\`) + accrue := func(k, v string) { + if v != "" { + kvs = append(kvs, k+"='"+escaper.Replace(v)+"'") + } + } + + if u.User != nil { + v := u.User.Username() + accrue("user", v) + + v, _ = u.User.Password() + accrue("password", v) + } + + if host, port, err := net.SplitHostPort(u.Host); err != nil { + accrue("host", u.Host) + } else { + accrue("host", host) + accrue("port", port) + } + + if u.Path != "" { + accrue("dbname", u.Path[1:]) + } + + q := u.Query() + for k := range q { + accrue(k, q.Get(k)) + } + + sort.Strings(kvs) // Makes testing easier (not a performance concern) + return strings.Join(kvs, " "), nil +} diff --git a/vendor/github.com/lib/pq/user_other.go b/vendor/github.com/lib/pq/user_other.go new file mode 100644 index 0000000000..3dae8f5572 --- /dev/null +++ b/vendor/github.com/lib/pq/user_other.go @@ -0,0 +1,10 @@ +// Package pq is a pure Go Postgres driver for the database/sql package. + +//go:build js || android || hurd || zos +// +build js android hurd zos + +package pq + +func userCurrent() (string, error) { + return "", ErrCouldNotDetectUsername +} diff --git a/vendor/github.com/lib/pq/user_posix.go b/vendor/github.com/lib/pq/user_posix.go new file mode 100644 index 0000000000..5f2d439bc4 --- /dev/null +++ b/vendor/github.com/lib/pq/user_posix.go @@ -0,0 +1,25 @@ +// Package pq is a pure Go Postgres driver for the database/sql package. + +//go:build aix || darwin || dragonfly || freebsd || (linux && !android) || nacl || netbsd || openbsd || plan9 || solaris || rumprun || illumos +// +build aix darwin dragonfly freebsd linux,!android nacl netbsd openbsd plan9 solaris rumprun illumos + +package pq + +import ( + "os" + "os/user" +) + +func userCurrent() (string, error) { + u, err := user.Current() + if err == nil { + return u.Username, nil + } + + name := os.Getenv("USER") + if name != "" { + return name, nil + } + + return "", ErrCouldNotDetectUsername +} diff --git a/vendor/github.com/lib/pq/user_windows.go b/vendor/github.com/lib/pq/user_windows.go new file mode 100644 index 0000000000..2b691267b9 --- /dev/null +++ b/vendor/github.com/lib/pq/user_windows.go @@ -0,0 +1,27 @@ +// Package pq is a pure Go Postgres driver for the database/sql package. +package pq + +import ( + "path/filepath" + "syscall" +) + +// Perform Windows user name lookup identically to libpq. +// +// The PostgreSQL code makes use of the legacy Win32 function +// GetUserName, and that function has not been imported into stock Go. +// GetUserNameEx is available though, the difference being that a +// wider range of names are available. To get the output to be the +// same as GetUserName, only the base (or last) component of the +// result is returned. +func userCurrent() (string, error) { + pw_name := make([]uint16, 128) + pwname_size := uint32(len(pw_name)) - 1 + err := syscall.GetUserNameEx(syscall.NameSamCompatible, &pw_name[0], &pwname_size) + if err != nil { + return "", ErrCouldNotDetectUsername + } + s := syscall.UTF16ToString(pw_name) + u := filepath.Base(s) + return u, nil +} diff --git a/vendor/github.com/lib/pq/uuid.go b/vendor/github.com/lib/pq/uuid.go new file mode 100644 index 0000000000..9a1b9e0748 --- /dev/null +++ b/vendor/github.com/lib/pq/uuid.go @@ -0,0 +1,23 @@ +package pq + +import ( + "encoding/hex" + "fmt" +) + +// decodeUUIDBinary interprets the binary format of a uuid, returning it in text format. +func decodeUUIDBinary(src []byte) ([]byte, error) { + if len(src) != 16 { + return nil, fmt.Errorf("pq: unable to decode uuid; bad length: %d", len(src)) + } + + dst := make([]byte, 36) + dst[8], dst[13], dst[18], dst[23] = '-', '-', '-', '-' + hex.Encode(dst[0:], src[0:4]) + hex.Encode(dst[9:], src[4:6]) + hex.Encode(dst[14:], src[6:8]) + hex.Encode(dst[19:], src[8:10]) + hex.Encode(dst[24:], src[10:16]) + + return dst, nil +} diff --git a/vendor/github.com/mitchellh/copystructure/LICENSE b/vendor/github.com/mitchellh/copystructure/LICENSE new file mode 100644 index 0000000000..2298515904 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/copystructure/README.md b/vendor/github.com/mitchellh/copystructure/README.md new file mode 100644 index 0000000000..f0fbd2e5c9 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/README.md @@ -0,0 +1,21 @@ +# copystructure + +copystructure is a Go library for deep copying values in Go. + +This allows you to copy Go values that may contain reference values +such as maps, slices, or pointers, and copy their data as well instead +of just their references. + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/copystructure +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/copystructure). + +The `Copy` function has examples associated with it there. diff --git a/vendor/github.com/mitchellh/copystructure/copier_time.go b/vendor/github.com/mitchellh/copystructure/copier_time.go new file mode 100644 index 0000000000..db6a6aa1a1 --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/copier_time.go @@ -0,0 +1,15 @@ +package copystructure + +import ( + "reflect" + "time" +) + +func init() { + Copiers[reflect.TypeOf(time.Time{})] = timeCopier +} + +func timeCopier(v interface{}) (interface{}, error) { + // Just... copy it. + return v.(time.Time), nil +} diff --git a/vendor/github.com/mitchellh/copystructure/copystructure.go b/vendor/github.com/mitchellh/copystructure/copystructure.go new file mode 100644 index 0000000000..8089e6670a --- /dev/null +++ b/vendor/github.com/mitchellh/copystructure/copystructure.go @@ -0,0 +1,631 @@ +package copystructure + +import ( + "errors" + "reflect" + "sync" + + "github.com/mitchellh/reflectwalk" +) + +const tagKey = "copy" + +// Copy returns a deep copy of v. +// +// Copy is unable to copy unexported fields in a struct (lowercase field names). +// Unexported fields can't be reflected by the Go runtime and therefore +// copystructure can't perform any data copies. +// +// For structs, copy behavior can be controlled with struct tags. For example: +// +// struct { +// Name string +// Data *bytes.Buffer `copy:"shallow"` +// } +// +// The available tag values are: +// +// * "ignore" - The field will be ignored, effectively resulting in it being +// assigned the zero value in the copy. +// +// * "shallow" - The field will be be shallow copied. This means that references +// values such as pointers, maps, slices, etc. will be directly assigned +// versus deep copied. +// +func Copy(v interface{}) (interface{}, error) { + return Config{}.Copy(v) +} + +// CopierFunc is a function that knows how to deep copy a specific type. +// Register these globally with the Copiers variable. +type CopierFunc func(interface{}) (interface{}, error) + +// Copiers is a map of types that behave specially when they are copied. +// If a type is found in this map while deep copying, this function +// will be called to copy it instead of attempting to copy all fields. +// +// The key should be the type, obtained using: reflect.TypeOf(value with type). +// +// It is unsafe to write to this map after Copies have started. If you +// are writing to this map while also copying, wrap all modifications to +// this map as well as to Copy in a mutex. +var Copiers map[reflect.Type]CopierFunc = make(map[reflect.Type]CopierFunc) + +// ShallowCopiers is a map of pointer types that behave specially +// when they are copied. If a type is found in this map while deep +// copying, the pointer value will be shallow copied and not walked +// into. +// +// The key should be the type, obtained using: reflect.TypeOf(value +// with type). +// +// It is unsafe to write to this map after Copies have started. If you +// are writing to this map while also copying, wrap all modifications to +// this map as well as to Copy in a mutex. +var ShallowCopiers map[reflect.Type]struct{} = make(map[reflect.Type]struct{}) + +// Must is a helper that wraps a call to a function returning +// (interface{}, error) and panics if the error is non-nil. It is intended +// for use in variable initializations and should only be used when a copy +// error should be a crashing case. +func Must(v interface{}, err error) interface{} { + if err != nil { + panic("copy error: " + err.Error()) + } + + return v +} + +var errPointerRequired = errors.New("Copy argument must be a pointer when Lock is true") + +type Config struct { + // Lock any types that are a sync.Locker and are not a mutex while copying. + // If there is an RLocker method, use that to get the sync.Locker. + Lock bool + + // Copiers is a map of types associated with a CopierFunc. Use the global + // Copiers map if this is nil. + Copiers map[reflect.Type]CopierFunc + + // ShallowCopiers is a map of pointer types that when they are + // shallow copied no matter where they are encountered. Use the + // global ShallowCopiers if this is nil. + ShallowCopiers map[reflect.Type]struct{} +} + +func (c Config) Copy(v interface{}) (interface{}, error) { + if c.Lock && reflect.ValueOf(v).Kind() != reflect.Ptr { + return nil, errPointerRequired + } + + w := new(walker) + if c.Lock { + w.useLocks = true + } + + if c.Copiers == nil { + c.Copiers = Copiers + } + w.copiers = c.Copiers + + if c.ShallowCopiers == nil { + c.ShallowCopiers = ShallowCopiers + } + w.shallowCopiers = c.ShallowCopiers + + err := reflectwalk.Walk(v, w) + if err != nil { + return nil, err + } + + // Get the result. If the result is nil, then we want to turn it + // into a typed nil if we can. + result := w.Result + if result == nil { + val := reflect.ValueOf(v) + result = reflect.Indirect(reflect.New(val.Type())).Interface() + } + + return result, nil +} + +// Return the key used to index interfaces types we've seen. Store the number +// of pointers in the upper 32bits, and the depth in the lower 32bits. This is +// easy to calculate, easy to match a key with our current depth, and we don't +// need to deal with initializing and cleaning up nested maps or slices. +func ifaceKey(pointers, depth int) uint64 { + return uint64(pointers)<<32 | uint64(depth) +} + +type walker struct { + Result interface{} + + copiers map[reflect.Type]CopierFunc + shallowCopiers map[reflect.Type]struct{} + depth int + ignoreDepth int + vals []reflect.Value + cs []reflect.Value + + // This stores the number of pointers we've walked over, indexed by depth. + ps []int + + // If an interface is indirected by a pointer, we need to know the type of + // interface to create when creating the new value. Store the interface + // types here, indexed by both the walk depth and the number of pointers + // already seen at that depth. Use ifaceKey to calculate the proper uint64 + // value. + ifaceTypes map[uint64]reflect.Type + + // any locks we've taken, indexed by depth + locks []sync.Locker + // take locks while walking the structure + useLocks bool +} + +func (w *walker) Enter(l reflectwalk.Location) error { + w.depth++ + + // ensure we have enough elements to index via w.depth + for w.depth >= len(w.locks) { + w.locks = append(w.locks, nil) + } + + for len(w.ps) < w.depth+1 { + w.ps = append(w.ps, 0) + } + + return nil +} + +func (w *walker) Exit(l reflectwalk.Location) error { + locker := w.locks[w.depth] + w.locks[w.depth] = nil + if locker != nil { + defer locker.Unlock() + } + + // clear out pointers and interfaces as we exit the stack + w.ps[w.depth] = 0 + + for k := range w.ifaceTypes { + mask := uint64(^uint32(0)) + if k&mask == uint64(w.depth) { + delete(w.ifaceTypes, k) + } + } + + w.depth-- + if w.ignoreDepth > w.depth { + w.ignoreDepth = 0 + } + + if w.ignoring() { + return nil + } + + switch l { + case reflectwalk.Array: + fallthrough + case reflectwalk.Map: + fallthrough + case reflectwalk.Slice: + w.replacePointerMaybe() + + // Pop map off our container + w.cs = w.cs[:len(w.cs)-1] + case reflectwalk.MapValue: + // Pop off the key and value + mv := w.valPop() + mk := w.valPop() + m := w.cs[len(w.cs)-1] + + // If mv is the zero value, SetMapIndex deletes the key form the map, + // or in this case never adds it. We need to create a properly typed + // zero value so that this key can be set. + if !mv.IsValid() { + mv = reflect.Zero(m.Elem().Type().Elem()) + } + m.Elem().SetMapIndex(mk, mv) + case reflectwalk.ArrayElem: + // Pop off the value and the index and set it on the array + v := w.valPop() + i := w.valPop().Interface().(int) + if v.IsValid() { + a := w.cs[len(w.cs)-1] + ae := a.Elem().Index(i) // storing array as pointer on stack - so need Elem() call + if ae.CanSet() { + ae.Set(v) + } + } + case reflectwalk.SliceElem: + // Pop off the value and the index and set it on the slice + v := w.valPop() + i := w.valPop().Interface().(int) + if v.IsValid() { + s := w.cs[len(w.cs)-1] + se := s.Elem().Index(i) + if se.CanSet() { + se.Set(v) + } + } + case reflectwalk.Struct: + w.replacePointerMaybe() + + // Remove the struct from the container stack + w.cs = w.cs[:len(w.cs)-1] + case reflectwalk.StructField: + // Pop off the value and the field + v := w.valPop() + f := w.valPop().Interface().(reflect.StructField) + if v.IsValid() { + s := w.cs[len(w.cs)-1] + sf := reflect.Indirect(s).FieldByName(f.Name) + + if sf.CanSet() { + sf.Set(v) + } + } + case reflectwalk.WalkLoc: + // Clear out the slices for GC + w.cs = nil + w.vals = nil + } + + return nil +} + +func (w *walker) Map(m reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(m) + + // Create the map. If the map itself is nil, then just make a nil map + var newMap reflect.Value + if m.IsNil() { + newMap = reflect.New(m.Type()) + } else { + newMap = wrapPtr(reflect.MakeMap(m.Type())) + } + + w.cs = append(w.cs, newMap) + w.valPush(newMap) + return nil +} + +func (w *walker) MapElem(m, k, v reflect.Value) error { + return nil +} + +func (w *walker) PointerEnter(v bool) error { + if v { + w.ps[w.depth]++ + } + return nil +} + +func (w *walker) PointerExit(v bool) error { + if v { + w.ps[w.depth]-- + } + return nil +} + +func (w *walker) Pointer(v reflect.Value) error { + if _, ok := w.shallowCopiers[v.Type()]; ok { + // Shallow copy this value. Use the same logic as primitive, then + // return skip. + if err := w.Primitive(v); err != nil { + return err + } + + return reflectwalk.SkipEntry + } + + return nil +} + +func (w *walker) Interface(v reflect.Value) error { + if !v.IsValid() { + return nil + } + if w.ifaceTypes == nil { + w.ifaceTypes = make(map[uint64]reflect.Type) + } + + w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)] = v.Type() + return nil +} + +func (w *walker) Primitive(v reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(v) + + // IsValid verifies the v is non-zero and CanInterface verifies + // that we're allowed to read this value (unexported fields). + var newV reflect.Value + if v.IsValid() && v.CanInterface() { + newV = reflect.New(v.Type()) + newV.Elem().Set(v) + } + + w.valPush(newV) + w.replacePointerMaybe() + return nil +} + +func (w *walker) Slice(s reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(s) + + var newS reflect.Value + if s.IsNil() { + newS = reflect.New(s.Type()) + } else { + newS = wrapPtr(reflect.MakeSlice(s.Type(), s.Len(), s.Cap())) + } + + w.cs = append(w.cs, newS) + w.valPush(newS) + return nil +} + +func (w *walker) SliceElem(i int, elem reflect.Value) error { + if w.ignoring() { + return nil + } + + // We don't write the slice here because elem might still be + // arbitrarily complex. Just record the index and continue on. + w.valPush(reflect.ValueOf(i)) + + return nil +} + +func (w *walker) Array(a reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(a) + + newA := reflect.New(a.Type()) + + w.cs = append(w.cs, newA) + w.valPush(newA) + return nil +} + +func (w *walker) ArrayElem(i int, elem reflect.Value) error { + if w.ignoring() { + return nil + } + + // We don't write the array here because elem might still be + // arbitrarily complex. Just record the index and continue on. + w.valPush(reflect.ValueOf(i)) + + return nil +} + +func (w *walker) Struct(s reflect.Value) error { + if w.ignoring() { + return nil + } + w.lock(s) + + var v reflect.Value + if c, ok := w.copiers[s.Type()]; ok { + // We have a Copier for this struct, so we use that copier to + // get the copy, and we ignore anything deeper than this. + w.ignoreDepth = w.depth + + dup, err := c(s.Interface()) + if err != nil { + return err + } + + // We need to put a pointer to the value on the value stack, + // so allocate a new pointer and set it. + v = reflect.New(s.Type()) + reflect.Indirect(v).Set(reflect.ValueOf(dup)) + } else { + // No copier, we copy ourselves and allow reflectwalk to guide + // us deeper into the structure for copying. + v = reflect.New(s.Type()) + } + + // Push the value onto the value stack for setting the struct field, + // and add the struct itself to the containers stack in case we walk + // deeper so that its own fields can be modified. + w.valPush(v) + w.cs = append(w.cs, v) + + return nil +} + +func (w *walker) StructField(f reflect.StructField, v reflect.Value) error { + if w.ignoring() { + return nil + } + + // If PkgPath is non-empty, this is a private (unexported) field. + // We do not set this unexported since the Go runtime doesn't allow us. + if f.PkgPath != "" { + return reflectwalk.SkipEntry + } + + switch f.Tag.Get(tagKey) { + case "shallow": + // If we're shallow copying then assign the value directly to the + // struct and skip the entry. + if v.IsValid() { + s := w.cs[len(w.cs)-1] + sf := reflect.Indirect(s).FieldByName(f.Name) + if sf.CanSet() { + sf.Set(v) + } + } + + return reflectwalk.SkipEntry + + case "ignore": + // Do nothing + return reflectwalk.SkipEntry + } + + // Push the field onto the stack, we'll handle it when we exit + // the struct field in Exit... + w.valPush(reflect.ValueOf(f)) + + return nil +} + +// ignore causes the walker to ignore any more values until we exit this on +func (w *walker) ignore() { + w.ignoreDepth = w.depth +} + +func (w *walker) ignoring() bool { + return w.ignoreDepth > 0 && w.depth >= w.ignoreDepth +} + +func (w *walker) pointerPeek() bool { + return w.ps[w.depth] > 0 +} + +func (w *walker) valPop() reflect.Value { + result := w.vals[len(w.vals)-1] + w.vals = w.vals[:len(w.vals)-1] + + // If we're out of values, that means we popped everything off. In + // this case, we reset the result so the next pushed value becomes + // the result. + if len(w.vals) == 0 { + w.Result = nil + } + + return result +} + +func (w *walker) valPush(v reflect.Value) { + w.vals = append(w.vals, v) + + // If we haven't set the result yet, then this is the result since + // it is the first (outermost) value we're seeing. + if w.Result == nil && v.IsValid() { + w.Result = v.Interface() + } +} + +func (w *walker) replacePointerMaybe() { + // Determine the last pointer value. If it is NOT a pointer, then + // we need to push that onto the stack. + if !w.pointerPeek() { + w.valPush(reflect.Indirect(w.valPop())) + return + } + + v := w.valPop() + + // If the expected type is a pointer to an interface of any depth, + // such as *interface{}, **interface{}, etc., then we need to convert + // the value "v" from *CONCRETE to *interface{} so types match for + // Set. + // + // Example if v is type *Foo where Foo is a struct, v would become + // *interface{} instead. This only happens if we have an interface expectation + // at this depth. + // + // For more info, see GH-16 + if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth], w.depth)]; ok && iType.Kind() == reflect.Interface { + y := reflect.New(iType) // Create *interface{} + y.Elem().Set(reflect.Indirect(v)) // Assign "Foo" to interface{} (dereferenced) + v = y // v is now typed *interface{} (where *v = Foo) + } + + for i := 1; i < w.ps[w.depth]; i++ { + if iType, ok := w.ifaceTypes[ifaceKey(w.ps[w.depth]-i, w.depth)]; ok { + iface := reflect.New(iType).Elem() + iface.Set(v) + v = iface + } + + p := reflect.New(v.Type()) + p.Elem().Set(v) + v = p + } + + w.valPush(v) +} + +// if this value is a Locker, lock it and add it to the locks slice +func (w *walker) lock(v reflect.Value) { + if !w.useLocks { + return + } + + if !v.IsValid() || !v.CanInterface() { + return + } + + type rlocker interface { + RLocker() sync.Locker + } + + var locker sync.Locker + + // We can't call Interface() on a value directly, since that requires + // a copy. This is OK, since the pointer to a value which is a sync.Locker + // is also a sync.Locker. + if v.Kind() == reflect.Ptr { + switch l := v.Interface().(type) { + case rlocker: + // don't lock a mutex directly + if _, ok := l.(*sync.RWMutex); !ok { + locker = l.RLocker() + } + case sync.Locker: + locker = l + } + } else if v.CanAddr() { + switch l := v.Addr().Interface().(type) { + case rlocker: + // don't lock a mutex directly + if _, ok := l.(*sync.RWMutex); !ok { + locker = l.RLocker() + } + case sync.Locker: + locker = l + } + } + + // still no callable locker + if locker == nil { + return + } + + // don't lock a mutex directly + switch locker.(type) { + case *sync.Mutex, *sync.RWMutex: + return + } + + locker.Lock() + w.locks[w.depth] = locker +} + +// wrapPtr is a helper that takes v and always make it *v. copystructure +// stores things internally as pointers until the last moment before unwrapping +func wrapPtr(v reflect.Value) reflect.Value { + if !v.IsValid() { + return v + } + vPtr := reflect.New(v.Type()) + vPtr.Elem().Set(v) + return vPtr +} diff --git a/vendor/github.com/mitchellh/go-wordwrap/LICENSE.md b/vendor/github.com/mitchellh/go-wordwrap/LICENSE.md new file mode 100644 index 0000000000..2298515904 --- /dev/null +++ b/vendor/github.com/mitchellh/go-wordwrap/LICENSE.md @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/go-wordwrap/README.md b/vendor/github.com/mitchellh/go-wordwrap/README.md new file mode 100644 index 0000000000..60ae311700 --- /dev/null +++ b/vendor/github.com/mitchellh/go-wordwrap/README.md @@ -0,0 +1,39 @@ +# go-wordwrap + +`go-wordwrap` (Golang package: `wordwrap`) is a package for Go that +automatically wraps words into multiple lines. The primary use case for this +is in formatting CLI output, but of course word wrapping is a generally useful +thing to do. + +## Installation and Usage + +Install using `go get github.com/mitchellh/go-wordwrap`. + +Full documentation is available at +http://godoc.org/github.com/mitchellh/go-wordwrap + +Below is an example of its usage ignoring errors: + +```go +wrapped := wordwrap.WrapString("foo bar baz", 3) +fmt.Println(wrapped) +``` + +Would output: + +``` +foo +bar +baz +``` + +## Word Wrap Algorithm + +This library doesn't use any clever algorithm for word wrapping. The wrapping +is actually very naive: whenever there is whitespace or an explicit linebreak. +The goal of this library is for word wrapping CLI output, so the input is +typically pretty well controlled human language. Because of this, the naive +approach typically works just fine. + +In the future, we'd like to make the algorithm more advanced. We would do +so without breaking the API. diff --git a/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go b/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go new file mode 100644 index 0000000000..ac67205bc2 --- /dev/null +++ b/vendor/github.com/mitchellh/go-wordwrap/wordwrap.go @@ -0,0 +1,73 @@ +package wordwrap + +import ( + "bytes" + "unicode" +) + +// WrapString wraps the given string within lim width in characters. +// +// Wrapping is currently naive and only happens at white-space. A future +// version of the library will implement smarter wrapping. This means that +// pathological cases can dramatically reach past the limit, such as a very +// long word. +func WrapString(s string, lim uint) string { + // Initialize a buffer with a slightly larger size to account for breaks + init := make([]byte, 0, len(s)) + buf := bytes.NewBuffer(init) + + var current uint + var wordBuf, spaceBuf bytes.Buffer + + for _, char := range s { + if char == '\n' { + if wordBuf.Len() == 0 { + if current+uint(spaceBuf.Len()) > lim { + current = 0 + } else { + current += uint(spaceBuf.Len()) + spaceBuf.WriteTo(buf) + } + spaceBuf.Reset() + } else { + current += uint(spaceBuf.Len() + wordBuf.Len()) + spaceBuf.WriteTo(buf) + spaceBuf.Reset() + wordBuf.WriteTo(buf) + wordBuf.Reset() + } + buf.WriteRune(char) + current = 0 + } else if unicode.IsSpace(char) { + if spaceBuf.Len() == 0 || wordBuf.Len() > 0 { + current += uint(spaceBuf.Len() + wordBuf.Len()) + spaceBuf.WriteTo(buf) + spaceBuf.Reset() + wordBuf.WriteTo(buf) + wordBuf.Reset() + } + + spaceBuf.WriteRune(char) + } else { + + wordBuf.WriteRune(char) + + if current+uint(spaceBuf.Len()+wordBuf.Len()) > lim && uint(wordBuf.Len()) < lim { + buf.WriteRune('\n') + current = 0 + spaceBuf.Reset() + } + } + } + + if wordBuf.Len() == 0 { + if current+uint(spaceBuf.Len()) <= lim { + spaceBuf.WriteTo(buf) + } + } else { + spaceBuf.WriteTo(buf) + wordBuf.WriteTo(buf) + } + + return buf.String() +} diff --git a/vendor/github.com/mitchellh/reflectwalk/.travis.yml b/vendor/github.com/mitchellh/reflectwalk/.travis.yml new file mode 100644 index 0000000000..4f2ee4d973 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/.travis.yml @@ -0,0 +1 @@ +language: go diff --git a/vendor/github.com/mitchellh/reflectwalk/LICENSE b/vendor/github.com/mitchellh/reflectwalk/LICENSE new file mode 100644 index 0000000000..f9c841a51e --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/reflectwalk/README.md b/vendor/github.com/mitchellh/reflectwalk/README.md new file mode 100644 index 0000000000..ac82cd2e15 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/README.md @@ -0,0 +1,6 @@ +# reflectwalk + +reflectwalk is a Go library for "walking" a value in Go using reflection, +in the same way a directory tree can be "walked" on the filesystem. Walking +a complex structure can allow you to do manipulations on unknown structures +such as those decoded from JSON. diff --git a/vendor/github.com/mitchellh/reflectwalk/location.go b/vendor/github.com/mitchellh/reflectwalk/location.go new file mode 100644 index 0000000000..6a7f176117 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/location.go @@ -0,0 +1,19 @@ +package reflectwalk + +//go:generate stringer -type=Location location.go + +type Location uint + +const ( + None Location = iota + Map + MapKey + MapValue + Slice + SliceElem + Array + ArrayElem + Struct + StructField + WalkLoc +) diff --git a/vendor/github.com/mitchellh/reflectwalk/location_string.go b/vendor/github.com/mitchellh/reflectwalk/location_string.go new file mode 100644 index 0000000000..70760cf4c7 --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/location_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type=Location location.go"; DO NOT EDIT. + +package reflectwalk + +import "fmt" + +const _Location_name = "NoneMapMapKeyMapValueSliceSliceElemArrayArrayElemStructStructFieldWalkLoc" + +var _Location_index = [...]uint8{0, 4, 7, 13, 21, 26, 35, 40, 49, 55, 66, 73} + +func (i Location) String() string { + if i >= Location(len(_Location_index)-1) { + return fmt.Sprintf("Location(%d)", i) + } + return _Location_name[_Location_index[i]:_Location_index[i+1]] +} diff --git a/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go new file mode 100644 index 0000000000..7fee7b050b --- /dev/null +++ b/vendor/github.com/mitchellh/reflectwalk/reflectwalk.go @@ -0,0 +1,420 @@ +// reflectwalk is a package that allows you to "walk" complex structures +// similar to how you may "walk" a filesystem: visiting every element one +// by one and calling callback functions allowing you to handle and manipulate +// those elements. +package reflectwalk + +import ( + "errors" + "reflect" +) + +// PrimitiveWalker implementations are able to handle primitive values +// within complex structures. Primitive values are numbers, strings, +// booleans, funcs, chans. +// +// These primitive values are often members of more complex +// structures (slices, maps, etc.) that are walkable by other interfaces. +type PrimitiveWalker interface { + Primitive(reflect.Value) error +} + +// InterfaceWalker implementations are able to handle interface values as they +// are encountered during the walk. +type InterfaceWalker interface { + Interface(reflect.Value) error +} + +// MapWalker implementations are able to handle individual elements +// found within a map structure. +type MapWalker interface { + Map(m reflect.Value) error + MapElem(m, k, v reflect.Value) error +} + +// SliceWalker implementations are able to handle slice elements found +// within complex structures. +type SliceWalker interface { + Slice(reflect.Value) error + SliceElem(int, reflect.Value) error +} + +// ArrayWalker implementations are able to handle array elements found +// within complex structures. +type ArrayWalker interface { + Array(reflect.Value) error + ArrayElem(int, reflect.Value) error +} + +// StructWalker is an interface that has methods that are called for +// structs when a Walk is done. +type StructWalker interface { + Struct(reflect.Value) error + StructField(reflect.StructField, reflect.Value) error +} + +// EnterExitWalker implementations are notified before and after +// they walk deeper into complex structures (into struct fields, +// into slice elements, etc.) +type EnterExitWalker interface { + Enter(Location) error + Exit(Location) error +} + +// PointerWalker implementations are notified when the value they're +// walking is a pointer or not. Pointer is called for _every_ value whether +// it is a pointer or not. +type PointerWalker interface { + PointerEnter(bool) error + PointerExit(bool) error +} + +// PointerValueWalker implementations are notified with the value of +// a particular pointer when a pointer is walked. Pointer is called +// right before PointerEnter. +type PointerValueWalker interface { + Pointer(reflect.Value) error +} + +// SkipEntry can be returned from walk functions to skip walking +// the value of this field. This is only valid in the following functions: +// +// - Struct: skips all fields from being walked +// - StructField: skips walking the struct value +// +var SkipEntry = errors.New("skip this entry") + +// Walk takes an arbitrary value and an interface and traverses the +// value, calling callbacks on the interface if they are supported. +// The interface should implement one or more of the walker interfaces +// in this package, such as PrimitiveWalker, StructWalker, etc. +func Walk(data, walker interface{}) (err error) { + v := reflect.ValueOf(data) + ew, ok := walker.(EnterExitWalker) + if ok { + err = ew.Enter(WalkLoc) + } + + if err == nil { + err = walk(v, walker) + } + + if ok && err == nil { + err = ew.Exit(WalkLoc) + } + + return +} + +func walk(v reflect.Value, w interface{}) (err error) { + // Determine if we're receiving a pointer and if so notify the walker. + // The logic here is convoluted but very important (tests will fail if + // almost any part is changed). I will try to explain here. + // + // First, we check if the value is an interface, if so, we really need + // to check the interface's VALUE to see whether it is a pointer. + // + // Check whether the value is then a pointer. If so, then set pointer + // to true to notify the user. + // + // If we still have a pointer or an interface after the indirections, then + // we unwrap another level + // + // At this time, we also set "v" to be the dereferenced value. This is + // because once we've unwrapped the pointer we want to use that value. + pointer := false + pointerV := v + + for { + if pointerV.Kind() == reflect.Interface { + if iw, ok := w.(InterfaceWalker); ok { + if err = iw.Interface(pointerV); err != nil { + return + } + } + + pointerV = pointerV.Elem() + } + + if pointerV.Kind() == reflect.Ptr { + if pw, ok := w.(PointerValueWalker); ok { + if err = pw.Pointer(pointerV); err != nil { + if err == SkipEntry { + // Skip the rest of this entry but clear the error + return nil + } + + return + } + } + + pointer = true + v = reflect.Indirect(pointerV) + } + if pw, ok := w.(PointerWalker); ok { + if err = pw.PointerEnter(pointer); err != nil { + return + } + + defer func(pointer bool) { + if err != nil { + return + } + + err = pw.PointerExit(pointer) + }(pointer) + } + + if pointer { + pointerV = v + } + pointer = false + + // If we still have a pointer or interface we have to indirect another level. + switch pointerV.Kind() { + case reflect.Ptr, reflect.Interface: + continue + } + break + } + + // We preserve the original value here because if it is an interface + // type, we want to pass that directly into the walkPrimitive, so that + // we can set it. + originalV := v + if v.Kind() == reflect.Interface { + v = v.Elem() + } + + k := v.Kind() + if k >= reflect.Int && k <= reflect.Complex128 { + k = reflect.Int + } + + switch k { + // Primitives + case reflect.Bool, reflect.Chan, reflect.Func, reflect.Int, reflect.String, reflect.Invalid: + err = walkPrimitive(originalV, w) + return + case reflect.Map: + err = walkMap(v, w) + return + case reflect.Slice: + err = walkSlice(v, w) + return + case reflect.Struct: + err = walkStruct(v, w) + return + case reflect.Array: + err = walkArray(v, w) + return + default: + panic("unsupported type: " + k.String()) + } +} + +func walkMap(v reflect.Value, w interface{}) error { + ew, ewok := w.(EnterExitWalker) + if ewok { + ew.Enter(Map) + } + + if mw, ok := w.(MapWalker); ok { + if err := mw.Map(v); err != nil { + return err + } + } + + for _, k := range v.MapKeys() { + kv := v.MapIndex(k) + + if mw, ok := w.(MapWalker); ok { + if err := mw.MapElem(v, k, kv); err != nil { + return err + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(MapKey) + } + + if err := walk(k, w); err != nil { + return err + } + + if ok { + ew.Exit(MapKey) + ew.Enter(MapValue) + } + + // get the map value again as it may have changed in the MapElem call + if err := walk(v.MapIndex(k), w); err != nil { + return err + } + + if ok { + ew.Exit(MapValue) + } + } + + if ewok { + ew.Exit(Map) + } + + return nil +} + +func walkPrimitive(v reflect.Value, w interface{}) error { + if pw, ok := w.(PrimitiveWalker); ok { + return pw.Primitive(v) + } + + return nil +} + +func walkSlice(v reflect.Value, w interface{}) (err error) { + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(Slice) + } + + if sw, ok := w.(SliceWalker); ok { + if err := sw.Slice(v); err != nil { + return err + } + } + + for i := 0; i < v.Len(); i++ { + elem := v.Index(i) + + if sw, ok := w.(SliceWalker); ok { + if err := sw.SliceElem(i, elem); err != nil { + return err + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(SliceElem) + } + + if err := walk(elem, w); err != nil { + return err + } + + if ok { + ew.Exit(SliceElem) + } + } + + ew, ok = w.(EnterExitWalker) + if ok { + ew.Exit(Slice) + } + + return nil +} + +func walkArray(v reflect.Value, w interface{}) (err error) { + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(Array) + } + + if aw, ok := w.(ArrayWalker); ok { + if err := aw.Array(v); err != nil { + return err + } + } + + for i := 0; i < v.Len(); i++ { + elem := v.Index(i) + + if aw, ok := w.(ArrayWalker); ok { + if err := aw.ArrayElem(i, elem); err != nil { + return err + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(ArrayElem) + } + + if err := walk(elem, w); err != nil { + return err + } + + if ok { + ew.Exit(ArrayElem) + } + } + + ew, ok = w.(EnterExitWalker) + if ok { + ew.Exit(Array) + } + + return nil +} + +func walkStruct(v reflect.Value, w interface{}) (err error) { + ew, ewok := w.(EnterExitWalker) + if ewok { + ew.Enter(Struct) + } + + skip := false + if sw, ok := w.(StructWalker); ok { + err = sw.Struct(v) + if err == SkipEntry { + skip = true + err = nil + } + if err != nil { + return + } + } + + if !skip { + vt := v.Type() + for i := 0; i < vt.NumField(); i++ { + sf := vt.Field(i) + f := v.FieldByIndex([]int{i}) + + if sw, ok := w.(StructWalker); ok { + err = sw.StructField(sf, f) + + // SkipEntry just pretends this field doesn't even exist + if err == SkipEntry { + continue + } + + if err != nil { + return + } + } + + ew, ok := w.(EnterExitWalker) + if ok { + ew.Enter(StructField) + } + + err = walk(f, w) + if err != nil { + return + } + + if ok { + ew.Exit(StructField) + } + } + } + + if ewok { + ew.Exit(Struct) + } + + return nil +} diff --git a/vendor/github.com/moby/locker/LICENSE b/vendor/github.com/moby/locker/LICENSE new file mode 100644 index 0000000000..2e0ec1dcf1 --- /dev/null +++ b/vendor/github.com/moby/locker/LICENSE @@ -0,0 +1,190 @@ + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2018 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/moby/locker/README.md b/vendor/github.com/moby/locker/README.md new file mode 100644 index 0000000000..a0852f0f8e --- /dev/null +++ b/vendor/github.com/moby/locker/README.md @@ -0,0 +1,65 @@ +Locker +===== + +locker provides a mechanism for creating finer-grained locking to help +free up more global locks to handle other tasks. + +The implementation looks close to a sync.Mutex, however, the user must provide a +reference to use to refer to the underlying lock when locking and unlocking, +and unlock may generate an error. + +If a lock with a given name does not exist when `Lock` is called, one is +created. +Lock references are automatically cleaned up on `Unlock` if nothing else is +waiting for the lock. + + +## Usage + +```go +package important + +import ( + "sync" + "time" + + "github.com/moby/locker" +) + +type important struct { + locks *locker.Locker + data map[string]interface{} + mu sync.Mutex +} + +func (i *important) Get(name string) interface{} { + i.locks.Lock(name) + defer i.locks.Unlock(name) + return i.data[name] +} + +func (i *important) Create(name string, data interface{}) { + i.locks.Lock(name) + defer i.locks.Unlock(name) + + i.createImportant(data) + + i.mu.Lock() + i.data[name] = data + i.mu.Unlock() +} + +func (i *important) createImportant(data interface{}) { + time.Sleep(10 * time.Second) +} +``` + +For functions dealing with a given name, always lock at the beginning of the +function (or before doing anything with the underlying state), this ensures any +other function that is dealing with the same name will block. + +When needing to modify the underlying data, use the global lock to ensure nothing +else is modifying it at the same time. +Since name lock is already in place, no reads will occur while the modification +is being performed. + diff --git a/vendor/github.com/moby/locker/locker.go b/vendor/github.com/moby/locker/locker.go new file mode 100644 index 0000000000..0b22ddfab8 --- /dev/null +++ b/vendor/github.com/moby/locker/locker.go @@ -0,0 +1,112 @@ +/* +Package locker provides a mechanism for creating finer-grained locking to help +free up more global locks to handle other tasks. + +The implementation looks close to a sync.Mutex, however the user must provide a +reference to use to refer to the underlying lock when locking and unlocking, +and unlock may generate an error. + +If a lock with a given name does not exist when `Lock` is called, one is +created. +Lock references are automatically cleaned up on `Unlock` if nothing else is +waiting for the lock. +*/ +package locker + +import ( + "errors" + "sync" + "sync/atomic" +) + +// ErrNoSuchLock is returned when the requested lock does not exist +var ErrNoSuchLock = errors.New("no such lock") + +// Locker provides a locking mechanism based on the passed in reference name +type Locker struct { + mu sync.Mutex + locks map[string]*lockCtr +} + +// lockCtr is used by Locker to represent a lock with a given name. +type lockCtr struct { + mu sync.Mutex + // waiters is the number of waiters waiting to acquire the lock + // this is int32 instead of uint32 so we can add `-1` in `dec()` + waiters int32 +} + +// inc increments the number of waiters waiting for the lock +func (l *lockCtr) inc() { + atomic.AddInt32(&l.waiters, 1) +} + +// dec decrements the number of waiters waiting on the lock +func (l *lockCtr) dec() { + atomic.AddInt32(&l.waiters, -1) +} + +// count gets the current number of waiters +func (l *lockCtr) count() int32 { + return atomic.LoadInt32(&l.waiters) +} + +// Lock locks the mutex +func (l *lockCtr) Lock() { + l.mu.Lock() +} + +// Unlock unlocks the mutex +func (l *lockCtr) Unlock() { + l.mu.Unlock() +} + +// New creates a new Locker +func New() *Locker { + return &Locker{ + locks: make(map[string]*lockCtr), + } +} + +// Lock locks a mutex with the given name. If it doesn't exist, one is created +func (l *Locker) Lock(name string) { + l.mu.Lock() + if l.locks == nil { + l.locks = make(map[string]*lockCtr) + } + + nameLock, exists := l.locks[name] + if !exists { + nameLock = &lockCtr{} + l.locks[name] = nameLock + } + + // increment the nameLock waiters while inside the main mutex + // this makes sure that the lock isn't deleted if `Lock` and `Unlock` are called concurrently + nameLock.inc() + l.mu.Unlock() + + // Lock the nameLock outside the main mutex so we don't block other operations + // once locked then we can decrement the number of waiters for this lock + nameLock.Lock() + nameLock.dec() +} + +// Unlock unlocks the mutex with the given name +// If the given lock is not being waited on by any other callers, it is deleted +func (l *Locker) Unlock(name string) error { + l.mu.Lock() + nameLock, exists := l.locks[name] + if !exists { + l.mu.Unlock() + return ErrNoSuchLock + } + + if nameLock.count() == 0 { + delete(l.locks, name) + } + nameLock.Unlock() + + l.mu.Unlock() + return nil +} diff --git a/vendor/github.com/moby/term/.gitignore b/vendor/github.com/moby/term/.gitignore new file mode 100644 index 0000000000..b0747ff010 --- /dev/null +++ b/vendor/github.com/moby/term/.gitignore @@ -0,0 +1,8 @@ +# if you want to ignore files created by your editor/tools, consider using a +# global .gitignore or .git/info/exclude see https://help.github.com/articles/ignoring-files +.* +!.github +!.gitignore +profile.out +# support running go modules in vendor mode for local development +vendor/ diff --git a/vendor/github.com/moby/term/LICENSE b/vendor/github.com/moby/term/LICENSE new file mode 100644 index 0000000000..6d8d58fb67 --- /dev/null +++ b/vendor/github.com/moby/term/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2013-2018 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/moby/term/README.md b/vendor/github.com/moby/term/README.md new file mode 100644 index 0000000000..0ce92cc339 --- /dev/null +++ b/vendor/github.com/moby/term/README.md @@ -0,0 +1,36 @@ +# term - utilities for dealing with terminals + +![Test](https://github.com/moby/term/workflows/Test/badge.svg) [![GoDoc](https://godoc.org/github.com/moby/term?status.svg)](https://godoc.org/github.com/moby/term) [![Go Report Card](https://goreportcard.com/badge/github.com/moby/term)](https://goreportcard.com/report/github.com/moby/term) + +term provides structures and helper functions to work with terminal (state, sizes). + +#### Using term + +```go +package main + +import ( + "log" + "os" + + "github.com/moby/term" +) + +func main() { + fd := os.Stdin.Fd() + if term.IsTerminal(fd) { + ws, err := term.GetWinsize(fd) + if err != nil { + log.Fatalf("term.GetWinsize: %s", err) + } + log.Printf("%d:%d\n", ws.Height, ws.Width) + } +} +``` + +## Contributing + +Want to hack on term? [Docker's contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md) apply. + +## Copyright and license +Code and documentation copyright 2015 Docker, inc. Code released under the Apache 2.0 license. Docs released under Creative commons. diff --git a/vendor/github.com/moby/term/ascii.go b/vendor/github.com/moby/term/ascii.go new file mode 100644 index 0000000000..55873c0556 --- /dev/null +++ b/vendor/github.com/moby/term/ascii.go @@ -0,0 +1,66 @@ +package term + +import ( + "fmt" + "strings" +) + +// ASCII list the possible supported ASCII key sequence +var ASCII = []string{ + "ctrl-@", + "ctrl-a", + "ctrl-b", + "ctrl-c", + "ctrl-d", + "ctrl-e", + "ctrl-f", + "ctrl-g", + "ctrl-h", + "ctrl-i", + "ctrl-j", + "ctrl-k", + "ctrl-l", + "ctrl-m", + "ctrl-n", + "ctrl-o", + "ctrl-p", + "ctrl-q", + "ctrl-r", + "ctrl-s", + "ctrl-t", + "ctrl-u", + "ctrl-v", + "ctrl-w", + "ctrl-x", + "ctrl-y", + "ctrl-z", + "ctrl-[", + "ctrl-\\", + "ctrl-]", + "ctrl-^", + "ctrl-_", +} + +// ToBytes converts a string representing a suite of key-sequence to the corresponding ASCII code. +func ToBytes(keys string) ([]byte, error) { + codes := []byte{} +next: + for _, key := range strings.Split(keys, ",") { + if len(key) != 1 { + for code, ctrl := range ASCII { + if ctrl == key { + codes = append(codes, byte(code)) + continue next + } + } + if key == "DEL" { + codes = append(codes, 127) + } else { + return nil, fmt.Errorf("Unknown character: '%s'", key) + } + } else { + codes = append(codes, key[0]) + } + } + return codes, nil +} diff --git a/vendor/github.com/moby/term/proxy.go b/vendor/github.com/moby/term/proxy.go new file mode 100644 index 0000000000..c47756b89a --- /dev/null +++ b/vendor/github.com/moby/term/proxy.go @@ -0,0 +1,88 @@ +package term + +import ( + "io" +) + +// EscapeError is special error which returned by a TTY proxy reader's Read() +// method in case its detach escape sequence is read. +type EscapeError struct{} + +func (EscapeError) Error() string { + return "read escape sequence" +} + +// escapeProxy is used only for attaches with a TTY. It is used to proxy +// stdin keypresses from the underlying reader and look for the passed in +// escape key sequence to signal a detach. +type escapeProxy struct { + escapeKeys []byte + escapeKeyPos int + r io.Reader + buf []byte +} + +// NewEscapeProxy returns a new TTY proxy reader which wraps the given reader +// and detects when the specified escape keys are read, in which case the Read +// method will return an error of type EscapeError. +func NewEscapeProxy(r io.Reader, escapeKeys []byte) io.Reader { + return &escapeProxy{ + escapeKeys: escapeKeys, + r: r, + } +} + +func (r *escapeProxy) Read(buf []byte) (n int, err error) { + if len(r.escapeKeys) > 0 && r.escapeKeyPos == len(r.escapeKeys) { + return 0, EscapeError{} + } + + if len(r.buf) > 0 { + n = copy(buf, r.buf) + r.buf = r.buf[n:] + } + + nr, err := r.r.Read(buf[n:]) + n += nr + if len(r.escapeKeys) == 0 { + return n, err + } + + for i := 0; i < n; i++ { + if buf[i] == r.escapeKeys[r.escapeKeyPos] { + r.escapeKeyPos++ + + // Check if the full escape sequence is matched. + if r.escapeKeyPos == len(r.escapeKeys) { + n = i + 1 - r.escapeKeyPos + if n < 0 { + n = 0 + } + return n, EscapeError{} + } + continue + } + + // If we need to prepend a partial escape sequence from the previous + // read, make sure the new buffer size doesn't exceed len(buf). + // Otherwise, preserve any extra data in a buffer for the next read. + if i < r.escapeKeyPos { + preserve := make([]byte, 0, r.escapeKeyPos+n) + preserve = append(preserve, r.escapeKeys[:r.escapeKeyPos]...) + preserve = append(preserve, buf[:n]...) + n = copy(buf, preserve) + i += r.escapeKeyPos + r.buf = append(r.buf, preserve[n:]...) + } + r.escapeKeyPos = 0 + } + + // If we're in the middle of reading an escape sequence, make sure we don't + // let the caller read it. If later on we find that this is not the escape + // sequence, we'll prepend it back to buf. + n -= r.escapeKeyPos + if n < 0 { + n = 0 + } + return n, err +} diff --git a/vendor/github.com/moby/term/tc.go b/vendor/github.com/moby/term/tc.go new file mode 100644 index 0000000000..65556027a6 --- /dev/null +++ b/vendor/github.com/moby/term/tc.go @@ -0,0 +1,19 @@ +// +build !windows + +package term + +import ( + "golang.org/x/sys/unix" +) + +func tcget(fd uintptr) (*Termios, error) { + p, err := unix.IoctlGetTermios(int(fd), getTermios) + if err != nil { + return nil, err + } + return p, nil +} + +func tcset(fd uintptr, p *Termios) error { + return unix.IoctlSetTermios(int(fd), setTermios, p) +} diff --git a/vendor/github.com/moby/term/term.go b/vendor/github.com/moby/term/term.go new file mode 100644 index 0000000000..29c6acf1c7 --- /dev/null +++ b/vendor/github.com/moby/term/term.go @@ -0,0 +1,120 @@ +// +build !windows + +// Package term provides structures and helper functions to work with +// terminal (state, sizes). +package term + +import ( + "errors" + "fmt" + "io" + "os" + "os/signal" + + "golang.org/x/sys/unix" +) + +var ( + // ErrInvalidState is returned if the state of the terminal is invalid. + ErrInvalidState = errors.New("Invalid terminal state") +) + +// State represents the state of the terminal. +type State struct { + termios Termios +} + +// Winsize represents the size of the terminal window. +type Winsize struct { + Height uint16 + Width uint16 + x uint16 + y uint16 +} + +// StdStreams returns the standard streams (stdin, stdout, stderr). +func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { + return os.Stdin, os.Stdout, os.Stderr +} + +// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. +func GetFdInfo(in interface{}) (uintptr, bool) { + var inFd uintptr + var isTerminalIn bool + if file, ok := in.(*os.File); ok { + inFd = file.Fd() + isTerminalIn = IsTerminal(inFd) + } + return inFd, isTerminalIn +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd uintptr) bool { + _, err := tcget(fd) + return err == nil +} + +// RestoreTerminal restores the terminal connected to the given file descriptor +// to a previous state. +func RestoreTerminal(fd uintptr, state *State) error { + if state == nil { + return ErrInvalidState + } + return tcset(fd, &state.termios) +} + +// SaveState saves the state of the terminal connected to the given file descriptor. +func SaveState(fd uintptr) (*State, error) { + termios, err := tcget(fd) + if err != nil { + return nil, err + } + return &State{termios: *termios}, nil +} + +// DisableEcho applies the specified state to the terminal connected to the file +// descriptor, with echo disabled. +func DisableEcho(fd uintptr, state *State) error { + newState := state.termios + newState.Lflag &^= unix.ECHO + + if err := tcset(fd, &newState); err != nil { + return err + } + handleInterrupt(fd, state) + return nil +} + +// SetRawTerminal puts the terminal connected to the given file descriptor into +// raw mode and returns the previous state. On UNIX, this puts both the input +// and output into raw mode. On Windows, it only puts the input into raw mode. +func SetRawTerminal(fd uintptr) (*State, error) { + oldState, err := MakeRaw(fd) + if err != nil { + return nil, err + } + handleInterrupt(fd, oldState) + return oldState, err +} + +// SetRawTerminalOutput puts the output of terminal connected to the given file +// descriptor into raw mode. On UNIX, this does nothing and returns nil for the +// state. On Windows, it disables LF -> CRLF translation. +func SetRawTerminalOutput(fd uintptr) (*State, error) { + return nil, nil +} + +func handleInterrupt(fd uintptr, state *State) { + sigchan := make(chan os.Signal, 1) + signal.Notify(sigchan, os.Interrupt) + go func() { + for range sigchan { + // quit cleanly and the new terminal item is on a new line + fmt.Println() + signal.Stop(sigchan) + close(sigchan) + RestoreTerminal(fd, state) + os.Exit(1) + } + }() +} diff --git a/vendor/github.com/moby/term/term_windows.go b/vendor/github.com/moby/term/term_windows.go new file mode 100644 index 0000000000..ba82960d4a --- /dev/null +++ b/vendor/github.com/moby/term/term_windows.go @@ -0,0 +1,231 @@ +package term + +import ( + "io" + "os" + "os/signal" + + windowsconsole "github.com/moby/term/windows" + "golang.org/x/sys/windows" +) + +// State holds the console mode for the terminal. +type State struct { + mode uint32 +} + +// Winsize is used for window size. +type Winsize struct { + Height uint16 + Width uint16 +} + +// vtInputSupported is true if winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported by the console +var vtInputSupported bool + +// StdStreams returns the standard streams (stdin, stdout, stderr). +func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { + // Turn on VT handling on all std handles, if possible. This might + // fail, in which case we will fall back to terminal emulation. + var ( + emulateStdin, emulateStdout, emulateStderr bool + + mode uint32 + ) + + fd := windows.Handle(os.Stdin.Fd()) + if err := windows.GetConsoleMode(fd, &mode); err == nil { + // Validate that winterm.ENABLE_VIRTUAL_TERMINAL_INPUT is supported, but do not set it. + if err = windows.SetConsoleMode(fd, mode|windows.ENABLE_VIRTUAL_TERMINAL_INPUT); err != nil { + emulateStdin = true + } else { + vtInputSupported = true + } + // Unconditionally set the console mode back even on failure because SetConsoleMode + // remembers invalid bits on input handles. + _ = windows.SetConsoleMode(fd, mode) + } + + fd = windows.Handle(os.Stdout.Fd()) + if err := windows.GetConsoleMode(fd, &mode); err == nil { + // Validate winterm.DISABLE_NEWLINE_AUTO_RETURN is supported, but do not set it. + if err = windows.SetConsoleMode(fd, mode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING|windows.DISABLE_NEWLINE_AUTO_RETURN); err != nil { + emulateStdout = true + } else { + _ = windows.SetConsoleMode(fd, mode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING) + } + } + + fd = windows.Handle(os.Stderr.Fd()) + if err := windows.GetConsoleMode(fd, &mode); err == nil { + // Validate winterm.DISABLE_NEWLINE_AUTO_RETURN is supported, but do not set it. + if err = windows.SetConsoleMode(fd, mode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING|windows.DISABLE_NEWLINE_AUTO_RETURN); err != nil { + emulateStderr = true + } else { + _ = windows.SetConsoleMode(fd, mode|windows.ENABLE_VIRTUAL_TERMINAL_PROCESSING) + } + } + + // Temporarily use STD_INPUT_HANDLE, STD_OUTPUT_HANDLE and + // STD_ERROR_HANDLE from syscall rather than x/sys/windows as long as + // go-ansiterm hasn't switch to x/sys/windows. + // TODO: switch back to x/sys/windows once go-ansiterm has switched + if emulateStdin { + h := uint32(windows.STD_INPUT_HANDLE) + stdIn = windowsconsole.NewAnsiReader(int(h)) + } else { + stdIn = os.Stdin + } + + if emulateStdout { + h := uint32(windows.STD_OUTPUT_HANDLE) + stdOut = windowsconsole.NewAnsiWriter(int(h)) + } else { + stdOut = os.Stdout + } + + if emulateStderr { + h := uint32(windows.STD_ERROR_HANDLE) + stdErr = windowsconsole.NewAnsiWriter(int(h)) + } else { + stdErr = os.Stderr + } + + return +} + +// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. +func GetFdInfo(in interface{}) (uintptr, bool) { + return windowsconsole.GetHandleInfo(in) +} + +// GetWinsize returns the window size based on the specified file descriptor. +func GetWinsize(fd uintptr) (*Winsize, error) { + var info windows.ConsoleScreenBufferInfo + if err := windows.GetConsoleScreenBufferInfo(windows.Handle(fd), &info); err != nil { + return nil, err + } + + winsize := &Winsize{ + Width: uint16(info.Window.Right - info.Window.Left + 1), + Height: uint16(info.Window.Bottom - info.Window.Top + 1), + } + + return winsize, nil +} + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal(fd uintptr) bool { + var mode uint32 + err := windows.GetConsoleMode(windows.Handle(fd), &mode) + return err == nil +} + +// RestoreTerminal restores the terminal connected to the given file descriptor +// to a previous state. +func RestoreTerminal(fd uintptr, state *State) error { + return windows.SetConsoleMode(windows.Handle(fd), state.mode) +} + +// SaveState saves the state of the terminal connected to the given file descriptor. +func SaveState(fd uintptr) (*State, error) { + var mode uint32 + + if err := windows.GetConsoleMode(windows.Handle(fd), &mode); err != nil { + return nil, err + } + + return &State{mode: mode}, nil +} + +// DisableEcho disables echo for the terminal connected to the given file descriptor. +// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx +func DisableEcho(fd uintptr, state *State) error { + mode := state.mode + mode &^= windows.ENABLE_ECHO_INPUT + mode |= windows.ENABLE_PROCESSED_INPUT | windows.ENABLE_LINE_INPUT + err := windows.SetConsoleMode(windows.Handle(fd), mode) + if err != nil { + return err + } + + // Register an interrupt handler to catch and restore prior state + restoreAtInterrupt(fd, state) + return nil +} + +// SetRawTerminal puts the terminal connected to the given file descriptor into +// raw mode and returns the previous state. On UNIX, this puts both the input +// and output into raw mode. On Windows, it only puts the input into raw mode. +func SetRawTerminal(fd uintptr) (*State, error) { + state, err := MakeRaw(fd) + if err != nil { + return nil, err + } + + // Register an interrupt handler to catch and restore prior state + restoreAtInterrupt(fd, state) + return state, err +} + +// SetRawTerminalOutput puts the output of terminal connected to the given file +// descriptor into raw mode. On UNIX, this does nothing and returns nil for the +// state. On Windows, it disables LF -> CRLF translation. +func SetRawTerminalOutput(fd uintptr) (*State, error) { + state, err := SaveState(fd) + if err != nil { + return nil, err + } + + // Ignore failures, since winterm.DISABLE_NEWLINE_AUTO_RETURN might not be supported on this + // version of Windows. + _ = windows.SetConsoleMode(windows.Handle(fd), state.mode|windows.DISABLE_NEWLINE_AUTO_RETURN) + return state, err +} + +// MakeRaw puts the terminal (Windows Console) connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be restored. +func MakeRaw(fd uintptr) (*State, error) { + state, err := SaveState(fd) + if err != nil { + return nil, err + } + + mode := state.mode + + // See + // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx + // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx + + // Disable these modes + mode &^= windows.ENABLE_ECHO_INPUT + mode &^= windows.ENABLE_LINE_INPUT + mode &^= windows.ENABLE_MOUSE_INPUT + mode &^= windows.ENABLE_WINDOW_INPUT + mode &^= windows.ENABLE_PROCESSED_INPUT + + // Enable these modes + mode |= windows.ENABLE_EXTENDED_FLAGS + mode |= windows.ENABLE_INSERT_MODE + mode |= windows.ENABLE_QUICK_EDIT_MODE + if vtInputSupported { + mode |= windows.ENABLE_VIRTUAL_TERMINAL_INPUT + } + + err = windows.SetConsoleMode(windows.Handle(fd), mode) + if err != nil { + return nil, err + } + return state, nil +} + +func restoreAtInterrupt(fd uintptr, state *State) { + sigchan := make(chan os.Signal, 1) + signal.Notify(sigchan, os.Interrupt) + + go func() { + _ = <-sigchan + _ = RestoreTerminal(fd, state) + os.Exit(0) + }() +} diff --git a/vendor/github.com/moby/term/termios.go b/vendor/github.com/moby/term/termios.go new file mode 100644 index 0000000000..0f028e2273 --- /dev/null +++ b/vendor/github.com/moby/term/termios.go @@ -0,0 +1,35 @@ +// +build !windows + +package term + +import ( + "golang.org/x/sys/unix" +) + +// Termios is the Unix API for terminal I/O. +type Termios = unix.Termios + +// MakeRaw puts the terminal connected to the given file descriptor into raw +// mode and returns the previous state of the terminal so that it can be +// restored. +func MakeRaw(fd uintptr) (*State, error) { + termios, err := tcget(fd) + if err != nil { + return nil, err + } + + oldState := State{termios: *termios} + + termios.Iflag &^= (unix.IGNBRK | unix.BRKINT | unix.PARMRK | unix.ISTRIP | unix.INLCR | unix.IGNCR | unix.ICRNL | unix.IXON) + termios.Oflag &^= unix.OPOST + termios.Lflag &^= (unix.ECHO | unix.ECHONL | unix.ICANON | unix.ISIG | unix.IEXTEN) + termios.Cflag &^= (unix.CSIZE | unix.PARENB) + termios.Cflag |= unix.CS8 + termios.Cc[unix.VMIN] = 1 + termios.Cc[unix.VTIME] = 0 + + if err := tcset(fd, termios); err != nil { + return nil, err + } + return &oldState, nil +} diff --git a/vendor/github.com/moby/term/termios_bsd.go b/vendor/github.com/moby/term/termios_bsd.go new file mode 100644 index 0000000000..922dd4baab --- /dev/null +++ b/vendor/github.com/moby/term/termios_bsd.go @@ -0,0 +1,12 @@ +// +build darwin freebsd openbsd netbsd + +package term + +import ( + "golang.org/x/sys/unix" +) + +const ( + getTermios = unix.TIOCGETA + setTermios = unix.TIOCSETA +) diff --git a/vendor/github.com/moby/term/termios_nonbsd.go b/vendor/github.com/moby/term/termios_nonbsd.go new file mode 100644 index 0000000000..038fd61ba1 --- /dev/null +++ b/vendor/github.com/moby/term/termios_nonbsd.go @@ -0,0 +1,12 @@ +//+build !darwin,!freebsd,!netbsd,!openbsd,!windows + +package term + +import ( + "golang.org/x/sys/unix" +) + +const ( + getTermios = unix.TCGETS + setTermios = unix.TCSETS +) diff --git a/vendor/github.com/moby/term/windows/ansi_reader.go b/vendor/github.com/moby/term/windows/ansi_reader.go new file mode 100644 index 0000000000..155251521b --- /dev/null +++ b/vendor/github.com/moby/term/windows/ansi_reader.go @@ -0,0 +1,252 @@ +// +build windows + +package windowsconsole + +import ( + "bytes" + "errors" + "fmt" + "io" + "os" + "strings" + "unsafe" + + ansiterm "github.com/Azure/go-ansiterm" + "github.com/Azure/go-ansiterm/winterm" +) + +const ( + escapeSequence = ansiterm.KEY_ESC_CSI +) + +// ansiReader wraps a standard input file (e.g., os.Stdin) providing ANSI sequence translation. +type ansiReader struct { + file *os.File + fd uintptr + buffer []byte + cbBuffer int + command []byte +} + +// NewAnsiReader returns an io.ReadCloser that provides VT100 terminal emulation on top of a +// Windows console input handle. +func NewAnsiReader(nFile int) io.ReadCloser { + file, fd := winterm.GetStdFile(nFile) + return &ansiReader{ + file: file, + fd: fd, + command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), + buffer: make([]byte, 0), + } +} + +// Close closes the wrapped file. +func (ar *ansiReader) Close() (err error) { + return ar.file.Close() +} + +// Fd returns the file descriptor of the wrapped file. +func (ar *ansiReader) Fd() uintptr { + return ar.fd +} + +// Read reads up to len(p) bytes of translated input events into p. +func (ar *ansiReader) Read(p []byte) (int, error) { + if len(p) == 0 { + return 0, nil + } + + // Previously read bytes exist, read as much as we can and return + if len(ar.buffer) > 0 { + originalLength := len(ar.buffer) + copiedLength := copy(p, ar.buffer) + + if copiedLength == originalLength { + ar.buffer = make([]byte, 0, len(p)) + } else { + ar.buffer = ar.buffer[copiedLength:] + } + + return copiedLength, nil + } + + // Read and translate key events + events, err := readInputEvents(ar, len(p)) + if err != nil { + return 0, err + } else if len(events) == 0 { + return 0, nil + } + + keyBytes := translateKeyEvents(events, []byte(escapeSequence)) + + // Save excess bytes and right-size keyBytes + if len(keyBytes) > len(p) { + ar.buffer = keyBytes[len(p):] + keyBytes = keyBytes[:len(p)] + } else if len(keyBytes) == 0 { + return 0, nil + } + + copiedLength := copy(p, keyBytes) + if copiedLength != len(keyBytes) { + return 0, errors.New("unexpected copy length encountered") + } + + return copiedLength, nil +} + +// readInputEvents polls until at least one event is available. +func readInputEvents(ar *ansiReader, maxBytes int) ([]winterm.INPUT_RECORD, error) { + // Determine the maximum number of records to retrieve + // -- Cast around the type system to obtain the size of a single INPUT_RECORD. + // unsafe.Sizeof requires an expression vs. a type-reference; the casting + // tricks the type system into believing it has such an expression. + recordSize := int(unsafe.Sizeof(*((*winterm.INPUT_RECORD)(unsafe.Pointer(&maxBytes))))) + countRecords := maxBytes / recordSize + if countRecords > ansiterm.MAX_INPUT_EVENTS { + countRecords = ansiterm.MAX_INPUT_EVENTS + } else if countRecords == 0 { + countRecords = 1 + } + + // Wait for and read input events + events := make([]winterm.INPUT_RECORD, countRecords) + nEvents := uint32(0) + eventsExist, err := winterm.WaitForSingleObject(ar.fd, winterm.WAIT_INFINITE) + if err != nil { + return nil, err + } + + if eventsExist { + err = winterm.ReadConsoleInput(ar.fd, events, &nEvents) + if err != nil { + return nil, err + } + } + + // Return a slice restricted to the number of returned records + return events[:nEvents], nil +} + +// KeyEvent Translation Helpers + +var arrowKeyMapPrefix = map[uint16]string{ + winterm.VK_UP: "%s%sA", + winterm.VK_DOWN: "%s%sB", + winterm.VK_RIGHT: "%s%sC", + winterm.VK_LEFT: "%s%sD", +} + +var keyMapPrefix = map[uint16]string{ + winterm.VK_UP: "\x1B[%sA", + winterm.VK_DOWN: "\x1B[%sB", + winterm.VK_RIGHT: "\x1B[%sC", + winterm.VK_LEFT: "\x1B[%sD", + winterm.VK_HOME: "\x1B[1%s~", // showkey shows ^[[1 + winterm.VK_END: "\x1B[4%s~", // showkey shows ^[[4 + winterm.VK_INSERT: "\x1B[2%s~", + winterm.VK_DELETE: "\x1B[3%s~", + winterm.VK_PRIOR: "\x1B[5%s~", + winterm.VK_NEXT: "\x1B[6%s~", + winterm.VK_F1: "", + winterm.VK_F2: "", + winterm.VK_F3: "\x1B[13%s~", + winterm.VK_F4: "\x1B[14%s~", + winterm.VK_F5: "\x1B[15%s~", + winterm.VK_F6: "\x1B[17%s~", + winterm.VK_F7: "\x1B[18%s~", + winterm.VK_F8: "\x1B[19%s~", + winterm.VK_F9: "\x1B[20%s~", + winterm.VK_F10: "\x1B[21%s~", + winterm.VK_F11: "\x1B[23%s~", + winterm.VK_F12: "\x1B[24%s~", +} + +// translateKeyEvents converts the input events into the appropriate ANSI string. +func translateKeyEvents(events []winterm.INPUT_RECORD, escapeSequence []byte) []byte { + var buffer bytes.Buffer + for _, event := range events { + if event.EventType == winterm.KEY_EVENT && event.KeyEvent.KeyDown != 0 { + buffer.WriteString(keyToString(&event.KeyEvent, escapeSequence)) + } + } + + return buffer.Bytes() +} + +// keyToString maps the given input event record to the corresponding string. +func keyToString(keyEvent *winterm.KEY_EVENT_RECORD, escapeSequence []byte) string { + if keyEvent.UnicodeChar == 0 { + return formatVirtualKey(keyEvent.VirtualKeyCode, keyEvent.ControlKeyState, escapeSequence) + } + + _, alt, control := getControlKeys(keyEvent.ControlKeyState) + if control { + // TODO(azlinux): Implement following control sequences + // -D Signals the end of input from the keyboard; also exits current shell. + // -H Deletes the first character to the left of the cursor. Also called the ERASE key. + // -Q Restarts printing after it has been stopped with -s. + // -S Suspends printing on the screen (does not stop the program). + // -U Deletes all characters on the current line. Also called the KILL key. + // -E Quits current command and creates a core + + } + + // +Key generates ESC N Key + if !control && alt { + return ansiterm.KEY_ESC_N + strings.ToLower(string(keyEvent.UnicodeChar)) + } + + return string(keyEvent.UnicodeChar) +} + +// formatVirtualKey converts a virtual key (e.g., up arrow) into the appropriate ANSI string. +func formatVirtualKey(key uint16, controlState uint32, escapeSequence []byte) string { + shift, alt, control := getControlKeys(controlState) + modifier := getControlKeysModifier(shift, alt, control) + + if format, ok := arrowKeyMapPrefix[key]; ok { + return fmt.Sprintf(format, escapeSequence, modifier) + } + + if format, ok := keyMapPrefix[key]; ok { + return fmt.Sprintf(format, modifier) + } + + return "" +} + +// getControlKeys extracts the shift, alt, and ctrl key states. +func getControlKeys(controlState uint32) (shift, alt, control bool) { + shift = 0 != (controlState & winterm.SHIFT_PRESSED) + alt = 0 != (controlState & (winterm.LEFT_ALT_PRESSED | winterm.RIGHT_ALT_PRESSED)) + control = 0 != (controlState & (winterm.LEFT_CTRL_PRESSED | winterm.RIGHT_CTRL_PRESSED)) + return shift, alt, control +} + +// getControlKeysModifier returns the ANSI modifier for the given combination of control keys. +func getControlKeysModifier(shift, alt, control bool) string { + if shift && alt && control { + return ansiterm.KEY_CONTROL_PARAM_8 + } + if alt && control { + return ansiterm.KEY_CONTROL_PARAM_7 + } + if shift && control { + return ansiterm.KEY_CONTROL_PARAM_6 + } + if control { + return ansiterm.KEY_CONTROL_PARAM_5 + } + if shift && alt { + return ansiterm.KEY_CONTROL_PARAM_4 + } + if alt { + return ansiterm.KEY_CONTROL_PARAM_3 + } + if shift { + return ansiterm.KEY_CONTROL_PARAM_2 + } + return "" +} diff --git a/vendor/github.com/moby/term/windows/ansi_writer.go b/vendor/github.com/moby/term/windows/ansi_writer.go new file mode 100644 index 0000000000..ccb5ef0775 --- /dev/null +++ b/vendor/github.com/moby/term/windows/ansi_writer.go @@ -0,0 +1,56 @@ +// +build windows + +package windowsconsole + +import ( + "io" + "os" + + ansiterm "github.com/Azure/go-ansiterm" + "github.com/Azure/go-ansiterm/winterm" +) + +// ansiWriter wraps a standard output file (e.g., os.Stdout) providing ANSI sequence translation. +type ansiWriter struct { + file *os.File + fd uintptr + infoReset *winterm.CONSOLE_SCREEN_BUFFER_INFO + command []byte + escapeSequence []byte + inAnsiSequence bool + parser *ansiterm.AnsiParser +} + +// NewAnsiWriter returns an io.Writer that provides VT100 terminal emulation on top of a +// Windows console output handle. +func NewAnsiWriter(nFile int) io.Writer { + file, fd := winterm.GetStdFile(nFile) + info, err := winterm.GetConsoleScreenBufferInfo(fd) + if err != nil { + return nil + } + + parser := ansiterm.CreateParser("Ground", winterm.CreateWinEventHandler(fd, file)) + + return &ansiWriter{ + file: file, + fd: fd, + infoReset: info, + command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), + escapeSequence: []byte(ansiterm.KEY_ESC_CSI), + parser: parser, + } +} + +func (aw *ansiWriter) Fd() uintptr { + return aw.fd +} + +// Write writes len(p) bytes from p to the underlying data stream. +func (aw *ansiWriter) Write(p []byte) (total int, err error) { + if len(p) == 0 { + return 0, nil + } + + return aw.parser.Parse(p) +} diff --git a/vendor/github.com/moby/term/windows/console.go b/vendor/github.com/moby/term/windows/console.go new file mode 100644 index 0000000000..993694ddcd --- /dev/null +++ b/vendor/github.com/moby/term/windows/console.go @@ -0,0 +1,39 @@ +// +build windows + +package windowsconsole + +import ( + "os" + + "golang.org/x/sys/windows" +) + +// GetHandleInfo returns file descriptor and bool indicating whether the file is a console. +func GetHandleInfo(in interface{}) (uintptr, bool) { + switch t := in.(type) { + case *ansiReader: + return t.Fd(), true + case *ansiWriter: + return t.Fd(), true + } + + var inFd uintptr + var isTerminal bool + + if file, ok := in.(*os.File); ok { + inFd = file.Fd() + isTerminal = isConsole(inFd) + } + return inFd, isTerminal +} + +// IsConsole returns true if the given file descriptor is a Windows Console. +// The code assumes that GetConsoleMode will return an error for file descriptors that are not a console. +// Deprecated: use golang.org/x/sys/windows.GetConsoleMode() or golang.org/x/term.IsTerminal() +var IsConsole = isConsole + +func isConsole(fd uintptr) bool { + var mode uint32 + err := windows.GetConsoleMode(windows.Handle(fd), &mode) + return err == nil +} diff --git a/vendor/github.com/moby/term/windows/doc.go b/vendor/github.com/moby/term/windows/doc.go new file mode 100644 index 0000000000..54265fffaf --- /dev/null +++ b/vendor/github.com/moby/term/windows/doc.go @@ -0,0 +1,5 @@ +// These files implement ANSI-aware input and output streams for use by the Docker Windows client. +// When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create +// and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls. + +package windowsconsole diff --git a/vendor/github.com/moby/term/winsize.go b/vendor/github.com/moby/term/winsize.go new file mode 100644 index 0000000000..1ef98d5996 --- /dev/null +++ b/vendor/github.com/moby/term/winsize.go @@ -0,0 +1,20 @@ +// +build !windows + +package term + +import ( + "golang.org/x/sys/unix" +) + +// GetWinsize returns the window size based on the specified file descriptor. +func GetWinsize(fd uintptr) (*Winsize, error) { + uws, err := unix.IoctlGetWinsize(int(fd), unix.TIOCGWINSZ) + ws := &Winsize{Height: uws.Row, Width: uws.Col, x: uws.Xpixel, y: uws.Ypixel} + return ws, err +} + +// SetWinsize tries to set the specified window size for the specified file descriptor. +func SetWinsize(fd uintptr, ws *Winsize) error { + uws := &unix.Winsize{Row: ws.Height, Col: ws.Width, Xpixel: ws.x, Ypixel: ws.y} + return unix.IoctlSetWinsize(int(fd), unix.TIOCSWINSZ, uws) +} diff --git a/vendor/github.com/morikuni/aec/LICENSE b/vendor/github.com/morikuni/aec/LICENSE new file mode 100644 index 0000000000..1c26401641 --- /dev/null +++ b/vendor/github.com/morikuni/aec/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Taihei Morikuni + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/morikuni/aec/README.md b/vendor/github.com/morikuni/aec/README.md new file mode 100644 index 0000000000..3cbc4343ee --- /dev/null +++ b/vendor/github.com/morikuni/aec/README.md @@ -0,0 +1,178 @@ +# aec + +[![GoDoc](https://godoc.org/github.com/morikuni/aec?status.svg)](https://godoc.org/github.com/morikuni/aec) + +Go wrapper for ANSI escape code. + +## Install + +```bash +go get github.com/morikuni/aec +``` + +## Features + +ANSI escape codes depend on terminal environment. +Some of these features may not work. +Check supported Font-Style/Font-Color features with [checkansi](./checkansi). + +[Wikipedia](https://en.wikipedia.org/wiki/ANSI_escape_code) for more detail. + +### Cursor + +- `Up(n)` +- `Down(n)` +- `Right(n)` +- `Left(n)` +- `NextLine(n)` +- `PreviousLine(n)` +- `Column(col)` +- `Position(row, col)` +- `Save` +- `Restore` +- `Hide` +- `Show` +- `Report` + +### Erase + +- `EraseDisplay(mode)` +- `EraseLine(mode)` + +### Scroll + +- `ScrollUp(n)` +- `ScrollDown(n)` + +### Font Style + +- `Bold` +- `Faint` +- `Italic` +- `Underline` +- `BlinkSlow` +- `BlinkRapid` +- `Inverse` +- `Conceal` +- `CrossOut` +- `Frame` +- `Encircle` +- `Overline` + +### Font Color + +Foreground color. + +- `DefaultF` +- `BlackF` +- `RedF` +- `GreenF` +- `YellowF` +- `BlueF` +- `MagentaF` +- `CyanF` +- `WhiteF` +- `LightBlackF` +- `LightRedF` +- `LightGreenF` +- `LightYellowF` +- `LightBlueF` +- `LightMagentaF` +- `LightCyanF` +- `LightWhiteF` +- `Color3BitF(color)` +- `Color8BitF(color)` +- `FullColorF(r, g, b)` + +Background color. + +- `DefaultB` +- `BlackB` +- `RedB` +- `GreenB` +- `YellowB` +- `BlueB` +- `MagentaB` +- `CyanB` +- `WhiteB` +- `LightBlackB` +- `LightRedB` +- `LightGreenB` +- `LightYellowB` +- `LightBlueB` +- `LightMagentaB` +- `LightCyanB` +- `LightWhiteB` +- `Color3BitB(color)` +- `Color8BitB(color)` +- `FullColorB(r, g, b)` + +### Color Converter + +24bit RGB color to ANSI color. + +- `NewRGB3Bit(r, g, b)` +- `NewRGB8Bit(r, g, b)` + +### Builder + +To mix these features. + +```go +custom := aec.EmptyBuilder.Right(2).RGB8BitF(128, 255, 64).RedB().ANSI +custom.Apply("Hello World") +``` + +## Usage + +1. Create ANSI by `aec.XXX().With(aec.YYY())` or `aec.EmptyBuilder.XXX().YYY().ANSI` +2. Print ANSI by `fmt.Print(ansi, "some string", aec.Reset)` or `fmt.Print(ansi.Apply("some string"))` + +`aec.Reset` should be added when using font style or font color features. + +## Example + +Simple progressbar. + +![sample](./sample.gif) + +```go +package main + +import ( + "fmt" + "strings" + "time" + + "github.com/morikuni/aec" +) + +func main() { + const n = 20 + builder := aec.EmptyBuilder + + up2 := aec.Up(2) + col := aec.Column(n + 2) + bar := aec.Color8BitF(aec.NewRGB8Bit(64, 255, 64)) + label := builder.LightRedF().Underline().With(col).Right(1).ANSI + + // for up2 + fmt.Println() + fmt.Println() + + for i := 0; i <= n; i++ { + fmt.Print(up2) + fmt.Println(label.Apply(fmt.Sprint(i, "/", n))) + fmt.Print("[") + fmt.Print(bar.Apply(strings.Repeat("=", i))) + fmt.Println(col.Apply("]")) + time.Sleep(100 * time.Millisecond) + } +} +``` + +## License + +[MIT](./LICENSE) + + diff --git a/vendor/github.com/morikuni/aec/aec.go b/vendor/github.com/morikuni/aec/aec.go new file mode 100644 index 0000000000..566be6eb1e --- /dev/null +++ b/vendor/github.com/morikuni/aec/aec.go @@ -0,0 +1,137 @@ +package aec + +import "fmt" + +// EraseMode is listed in a variable EraseModes. +type EraseMode uint + +var ( + // EraseModes is a list of EraseMode. + EraseModes struct { + // All erase all. + All EraseMode + + // Head erase to head. + Head EraseMode + + // Tail erase to tail. + Tail EraseMode + } + + // Save saves the cursor position. + Save ANSI + + // Restore restores the cursor position. + Restore ANSI + + // Hide hides the cursor. + Hide ANSI + + // Show shows the cursor. + Show ANSI + + // Report reports the cursor position. + Report ANSI +) + +// Up moves up the cursor. +func Up(n uint) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dA", n)) +} + +// Down moves down the cursor. +func Down(n uint) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dB", n)) +} + +// Right moves right the cursor. +func Right(n uint) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dC", n)) +} + +// Left moves left the cursor. +func Left(n uint) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dD", n)) +} + +// NextLine moves down the cursor to head of a line. +func NextLine(n uint) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dE", n)) +} + +// PreviousLine moves up the cursor to head of a line. +func PreviousLine(n uint) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dF", n)) +} + +// Column set the cursor position to a given column. +func Column(col uint) ANSI { + return newAnsi(fmt.Sprintf(esc+"%dG", col)) +} + +// Position set the cursor position to a given absolute position. +func Position(row, col uint) ANSI { + return newAnsi(fmt.Sprintf(esc+"%d;%dH", row, col)) +} + +// EraseDisplay erases display by given EraseMode. +func EraseDisplay(m EraseMode) ANSI { + return newAnsi(fmt.Sprintf(esc+"%dJ", m)) +} + +// EraseLine erases lines by given EraseMode. +func EraseLine(m EraseMode) ANSI { + return newAnsi(fmt.Sprintf(esc+"%dK", m)) +} + +// ScrollUp scrolls up the page. +func ScrollUp(n int) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dS", n)) +} + +// ScrollDown scrolls down the page. +func ScrollDown(n int) ANSI { + if n == 0 { + return empty + } + return newAnsi(fmt.Sprintf(esc+"%dT", n)) +} + +func init() { + EraseModes = struct { + All EraseMode + Head EraseMode + Tail EraseMode + }{ + Tail: 0, + Head: 1, + All: 2, + } + + Save = newAnsi(esc + "s") + Restore = newAnsi(esc + "u") + Hide = newAnsi(esc + "?25l") + Show = newAnsi(esc + "?25h") + Report = newAnsi(esc + "6n") +} diff --git a/vendor/github.com/morikuni/aec/ansi.go b/vendor/github.com/morikuni/aec/ansi.go new file mode 100644 index 0000000000..e60722e6e6 --- /dev/null +++ b/vendor/github.com/morikuni/aec/ansi.go @@ -0,0 +1,59 @@ +package aec + +import ( + "fmt" + "strings" +) + +const esc = "\x1b[" + +// Reset resets SGR effect. +const Reset string = "\x1b[0m" + +var empty = newAnsi("") + +// ANSI represents ANSI escape code. +type ANSI interface { + fmt.Stringer + + // With adapts given ANSIs. + With(...ANSI) ANSI + + // Apply wraps given string in ANSI. + Apply(string) string +} + +type ansiImpl string + +func newAnsi(s string) *ansiImpl { + r := ansiImpl(s) + return &r +} + +func (a *ansiImpl) With(ansi ...ANSI) ANSI { + return concat(append([]ANSI{a}, ansi...)) +} + +func (a *ansiImpl) Apply(s string) string { + return a.String() + s + Reset +} + +func (a *ansiImpl) String() string { + return string(*a) +} + +// Apply wraps given string in ANSIs. +func Apply(s string, ansi ...ANSI) string { + if len(ansi) == 0 { + return s + } + return concat(ansi).Apply(s) +} + +func concat(ansi []ANSI) ANSI { + strs := make([]string, 0, len(ansi)) + for _, p := range ansi { + strs = append(strs, p.String()) + } + return newAnsi(strings.Join(strs, "")) +} diff --git a/vendor/github.com/morikuni/aec/builder.go b/vendor/github.com/morikuni/aec/builder.go new file mode 100644 index 0000000000..13bd002d4e --- /dev/null +++ b/vendor/github.com/morikuni/aec/builder.go @@ -0,0 +1,388 @@ +package aec + +// Builder is a lightweight syntax to construct customized ANSI. +type Builder struct { + ANSI ANSI +} + +// EmptyBuilder is an initialized Builder. +var EmptyBuilder *Builder + +// NewBuilder creates a Builder from existing ANSI. +func NewBuilder(a ...ANSI) *Builder { + return &Builder{concat(a)} +} + +// With is a syntax for With. +func (builder *Builder) With(a ...ANSI) *Builder { + return NewBuilder(builder.ANSI.With(a...)) +} + +// Up is a syntax for Up. +func (builder *Builder) Up(n uint) *Builder { + return builder.With(Up(n)) +} + +// Down is a syntax for Down. +func (builder *Builder) Down(n uint) *Builder { + return builder.With(Down(n)) +} + +// Right is a syntax for Right. +func (builder *Builder) Right(n uint) *Builder { + return builder.With(Right(n)) +} + +// Left is a syntax for Left. +func (builder *Builder) Left(n uint) *Builder { + return builder.With(Left(n)) +} + +// NextLine is a syntax for NextLine. +func (builder *Builder) NextLine(n uint) *Builder { + return builder.With(NextLine(n)) +} + +// PreviousLine is a syntax for PreviousLine. +func (builder *Builder) PreviousLine(n uint) *Builder { + return builder.With(PreviousLine(n)) +} + +// Column is a syntax for Column. +func (builder *Builder) Column(col uint) *Builder { + return builder.With(Column(col)) +} + +// Position is a syntax for Position. +func (builder *Builder) Position(row, col uint) *Builder { + return builder.With(Position(row, col)) +} + +// EraseDisplay is a syntax for EraseDisplay. +func (builder *Builder) EraseDisplay(m EraseMode) *Builder { + return builder.With(EraseDisplay(m)) +} + +// EraseLine is a syntax for EraseLine. +func (builder *Builder) EraseLine(m EraseMode) *Builder { + return builder.With(EraseLine(m)) +} + +// ScrollUp is a syntax for ScrollUp. +func (builder *Builder) ScrollUp(n int) *Builder { + return builder.With(ScrollUp(n)) +} + +// ScrollDown is a syntax for ScrollDown. +func (builder *Builder) ScrollDown(n int) *Builder { + return builder.With(ScrollDown(n)) +} + +// Save is a syntax for Save. +func (builder *Builder) Save() *Builder { + return builder.With(Save) +} + +// Restore is a syntax for Restore. +func (builder *Builder) Restore() *Builder { + return builder.With(Restore) +} + +// Hide is a syntax for Hide. +func (builder *Builder) Hide() *Builder { + return builder.With(Hide) +} + +// Show is a syntax for Show. +func (builder *Builder) Show() *Builder { + return builder.With(Show) +} + +// Report is a syntax for Report. +func (builder *Builder) Report() *Builder { + return builder.With(Report) +} + +// Bold is a syntax for Bold. +func (builder *Builder) Bold() *Builder { + return builder.With(Bold) +} + +// Faint is a syntax for Faint. +func (builder *Builder) Faint() *Builder { + return builder.With(Faint) +} + +// Italic is a syntax for Italic. +func (builder *Builder) Italic() *Builder { + return builder.With(Italic) +} + +// Underline is a syntax for Underline. +func (builder *Builder) Underline() *Builder { + return builder.With(Underline) +} + +// BlinkSlow is a syntax for BlinkSlow. +func (builder *Builder) BlinkSlow() *Builder { + return builder.With(BlinkSlow) +} + +// BlinkRapid is a syntax for BlinkRapid. +func (builder *Builder) BlinkRapid() *Builder { + return builder.With(BlinkRapid) +} + +// Inverse is a syntax for Inverse. +func (builder *Builder) Inverse() *Builder { + return builder.With(Inverse) +} + +// Conceal is a syntax for Conceal. +func (builder *Builder) Conceal() *Builder { + return builder.With(Conceal) +} + +// CrossOut is a syntax for CrossOut. +func (builder *Builder) CrossOut() *Builder { + return builder.With(CrossOut) +} + +// BlackF is a syntax for BlackF. +func (builder *Builder) BlackF() *Builder { + return builder.With(BlackF) +} + +// RedF is a syntax for RedF. +func (builder *Builder) RedF() *Builder { + return builder.With(RedF) +} + +// GreenF is a syntax for GreenF. +func (builder *Builder) GreenF() *Builder { + return builder.With(GreenF) +} + +// YellowF is a syntax for YellowF. +func (builder *Builder) YellowF() *Builder { + return builder.With(YellowF) +} + +// BlueF is a syntax for BlueF. +func (builder *Builder) BlueF() *Builder { + return builder.With(BlueF) +} + +// MagentaF is a syntax for MagentaF. +func (builder *Builder) MagentaF() *Builder { + return builder.With(MagentaF) +} + +// CyanF is a syntax for CyanF. +func (builder *Builder) CyanF() *Builder { + return builder.With(CyanF) +} + +// WhiteF is a syntax for WhiteF. +func (builder *Builder) WhiteF() *Builder { + return builder.With(WhiteF) +} + +// DefaultF is a syntax for DefaultF. +func (builder *Builder) DefaultF() *Builder { + return builder.With(DefaultF) +} + +// BlackB is a syntax for BlackB. +func (builder *Builder) BlackB() *Builder { + return builder.With(BlackB) +} + +// RedB is a syntax for RedB. +func (builder *Builder) RedB() *Builder { + return builder.With(RedB) +} + +// GreenB is a syntax for GreenB. +func (builder *Builder) GreenB() *Builder { + return builder.With(GreenB) +} + +// YellowB is a syntax for YellowB. +func (builder *Builder) YellowB() *Builder { + return builder.With(YellowB) +} + +// BlueB is a syntax for BlueB. +func (builder *Builder) BlueB() *Builder { + return builder.With(BlueB) +} + +// MagentaB is a syntax for MagentaB. +func (builder *Builder) MagentaB() *Builder { + return builder.With(MagentaB) +} + +// CyanB is a syntax for CyanB. +func (builder *Builder) CyanB() *Builder { + return builder.With(CyanB) +} + +// WhiteB is a syntax for WhiteB. +func (builder *Builder) WhiteB() *Builder { + return builder.With(WhiteB) +} + +// DefaultB is a syntax for DefaultB. +func (builder *Builder) DefaultB() *Builder { + return builder.With(DefaultB) +} + +// Frame is a syntax for Frame. +func (builder *Builder) Frame() *Builder { + return builder.With(Frame) +} + +// Encircle is a syntax for Encircle. +func (builder *Builder) Encircle() *Builder { + return builder.With(Encircle) +} + +// Overline is a syntax for Overline. +func (builder *Builder) Overline() *Builder { + return builder.With(Overline) +} + +// LightBlackF is a syntax for LightBlueF. +func (builder *Builder) LightBlackF() *Builder { + return builder.With(LightBlackF) +} + +// LightRedF is a syntax for LightRedF. +func (builder *Builder) LightRedF() *Builder { + return builder.With(LightRedF) +} + +// LightGreenF is a syntax for LightGreenF. +func (builder *Builder) LightGreenF() *Builder { + return builder.With(LightGreenF) +} + +// LightYellowF is a syntax for LightYellowF. +func (builder *Builder) LightYellowF() *Builder { + return builder.With(LightYellowF) +} + +// LightBlueF is a syntax for LightBlueF. +func (builder *Builder) LightBlueF() *Builder { + return builder.With(LightBlueF) +} + +// LightMagentaF is a syntax for LightMagentaF. +func (builder *Builder) LightMagentaF() *Builder { + return builder.With(LightMagentaF) +} + +// LightCyanF is a syntax for LightCyanF. +func (builder *Builder) LightCyanF() *Builder { + return builder.With(LightCyanF) +} + +// LightWhiteF is a syntax for LightWhiteF. +func (builder *Builder) LightWhiteF() *Builder { + return builder.With(LightWhiteF) +} + +// LightBlackB is a syntax for LightBlackB. +func (builder *Builder) LightBlackB() *Builder { + return builder.With(LightBlackB) +} + +// LightRedB is a syntax for LightRedB. +func (builder *Builder) LightRedB() *Builder { + return builder.With(LightRedB) +} + +// LightGreenB is a syntax for LightGreenB. +func (builder *Builder) LightGreenB() *Builder { + return builder.With(LightGreenB) +} + +// LightYellowB is a syntax for LightYellowB. +func (builder *Builder) LightYellowB() *Builder { + return builder.With(LightYellowB) +} + +// LightBlueB is a syntax for LightBlueB. +func (builder *Builder) LightBlueB() *Builder { + return builder.With(LightBlueB) +} + +// LightMagentaB is a syntax for LightMagentaB. +func (builder *Builder) LightMagentaB() *Builder { + return builder.With(LightMagentaB) +} + +// LightCyanB is a syntax for LightCyanB. +func (builder *Builder) LightCyanB() *Builder { + return builder.With(LightCyanB) +} + +// LightWhiteB is a syntax for LightWhiteB. +func (builder *Builder) LightWhiteB() *Builder { + return builder.With(LightWhiteB) +} + +// Color3BitF is a syntax for Color3BitF. +func (builder *Builder) Color3BitF(c RGB3Bit) *Builder { + return builder.With(Color3BitF(c)) +} + +// Color3BitB is a syntax for Color3BitB. +func (builder *Builder) Color3BitB(c RGB3Bit) *Builder { + return builder.With(Color3BitB(c)) +} + +// Color8BitF is a syntax for Color8BitF. +func (builder *Builder) Color8BitF(c RGB8Bit) *Builder { + return builder.With(Color8BitF(c)) +} + +// Color8BitB is a syntax for Color8BitB. +func (builder *Builder) Color8BitB(c RGB8Bit) *Builder { + return builder.With(Color8BitB(c)) +} + +// FullColorF is a syntax for FullColorF. +func (builder *Builder) FullColorF(r, g, b uint8) *Builder { + return builder.With(FullColorF(r, g, b)) +} + +// FullColorB is a syntax for FullColorB. +func (builder *Builder) FullColorB(r, g, b uint8) *Builder { + return builder.With(FullColorB(r, g, b)) +} + +// RGB3BitF is a syntax for Color3BitF with NewRGB3Bit. +func (builder *Builder) RGB3BitF(r, g, b uint8) *Builder { + return builder.Color3BitF(NewRGB3Bit(r, g, b)) +} + +// RGB3BitB is a syntax for Color3BitB with NewRGB3Bit. +func (builder *Builder) RGB3BitB(r, g, b uint8) *Builder { + return builder.Color3BitB(NewRGB3Bit(r, g, b)) +} + +// RGB8BitF is a syntax for Color8BitF with NewRGB8Bit. +func (builder *Builder) RGB8BitF(r, g, b uint8) *Builder { + return builder.Color8BitF(NewRGB8Bit(r, g, b)) +} + +// RGB8BitB is a syntax for Color8BitB with NewRGB8Bit. +func (builder *Builder) RGB8BitB(r, g, b uint8) *Builder { + return builder.Color8BitB(NewRGB8Bit(r, g, b)) +} + +func init() { + EmptyBuilder = &Builder{empty} +} diff --git a/vendor/github.com/morikuni/aec/sample.gif b/vendor/github.com/morikuni/aec/sample.gif new file mode 100644 index 0000000000..c6c613bb70 Binary files /dev/null and b/vendor/github.com/morikuni/aec/sample.gif differ diff --git a/vendor/github.com/morikuni/aec/sgr.go b/vendor/github.com/morikuni/aec/sgr.go new file mode 100644 index 0000000000..0ba3464e6d --- /dev/null +++ b/vendor/github.com/morikuni/aec/sgr.go @@ -0,0 +1,202 @@ +package aec + +import ( + "fmt" +) + +// RGB3Bit is a 3bit RGB color. +type RGB3Bit uint8 + +// RGB8Bit is a 8bit RGB color. +type RGB8Bit uint8 + +func newSGR(n uint) ANSI { + return newAnsi(fmt.Sprintf(esc+"%dm", n)) +} + +// NewRGB3Bit create a RGB3Bit from given RGB. +func NewRGB3Bit(r, g, b uint8) RGB3Bit { + return RGB3Bit((r >> 7) | ((g >> 6) & 0x2) | ((b >> 5) & 0x4)) +} + +// NewRGB8Bit create a RGB8Bit from given RGB. +func NewRGB8Bit(r, g, b uint8) RGB8Bit { + return RGB8Bit(16 + 36*(r/43) + 6*(g/43) + b/43) +} + +// Color3BitF set the foreground color of text. +func Color3BitF(c RGB3Bit) ANSI { + return newAnsi(fmt.Sprintf(esc+"%dm", c+30)) +} + +// Color3BitB set the background color of text. +func Color3BitB(c RGB3Bit) ANSI { + return newAnsi(fmt.Sprintf(esc+"%dm", c+40)) +} + +// Color8BitF set the foreground color of text. +func Color8BitF(c RGB8Bit) ANSI { + return newAnsi(fmt.Sprintf(esc+"38;5;%dm", c)) +} + +// Color8BitB set the background color of text. +func Color8BitB(c RGB8Bit) ANSI { + return newAnsi(fmt.Sprintf(esc+"48;5;%dm", c)) +} + +// FullColorF set the foreground color of text. +func FullColorF(r, g, b uint8) ANSI { + return newAnsi(fmt.Sprintf(esc+"38;2;%d;%d;%dm", r, g, b)) +} + +// FullColorB set the foreground color of text. +func FullColorB(r, g, b uint8) ANSI { + return newAnsi(fmt.Sprintf(esc+"48;2;%d;%d;%dm", r, g, b)) +} + +// Style +var ( + // Bold set the text style to bold or increased intensity. + Bold ANSI + + // Faint set the text style to faint. + Faint ANSI + + // Italic set the text style to italic. + Italic ANSI + + // Underline set the text style to underline. + Underline ANSI + + // BlinkSlow set the text style to slow blink. + BlinkSlow ANSI + + // BlinkRapid set the text style to rapid blink. + BlinkRapid ANSI + + // Inverse swap the foreground color and background color. + Inverse ANSI + + // Conceal set the text style to conceal. + Conceal ANSI + + // CrossOut set the text style to crossed out. + CrossOut ANSI + + // Frame set the text style to framed. + Frame ANSI + + // Encircle set the text style to encircled. + Encircle ANSI + + // Overline set the text style to overlined. + Overline ANSI +) + +// Foreground color of text. +var ( + // DefaultF is the default color of foreground. + DefaultF ANSI + + // Normal color + BlackF ANSI + RedF ANSI + GreenF ANSI + YellowF ANSI + BlueF ANSI + MagentaF ANSI + CyanF ANSI + WhiteF ANSI + + // Light color + LightBlackF ANSI + LightRedF ANSI + LightGreenF ANSI + LightYellowF ANSI + LightBlueF ANSI + LightMagentaF ANSI + LightCyanF ANSI + LightWhiteF ANSI +) + +// Background color of text. +var ( + // DefaultB is the default color of background. + DefaultB ANSI + + // Normal color + BlackB ANSI + RedB ANSI + GreenB ANSI + YellowB ANSI + BlueB ANSI + MagentaB ANSI + CyanB ANSI + WhiteB ANSI + + // Light color + LightBlackB ANSI + LightRedB ANSI + LightGreenB ANSI + LightYellowB ANSI + LightBlueB ANSI + LightMagentaB ANSI + LightCyanB ANSI + LightWhiteB ANSI +) + +func init() { + Bold = newSGR(1) + Faint = newSGR(2) + Italic = newSGR(3) + Underline = newSGR(4) + BlinkSlow = newSGR(5) + BlinkRapid = newSGR(6) + Inverse = newSGR(7) + Conceal = newSGR(8) + CrossOut = newSGR(9) + + BlackF = newSGR(30) + RedF = newSGR(31) + GreenF = newSGR(32) + YellowF = newSGR(33) + BlueF = newSGR(34) + MagentaF = newSGR(35) + CyanF = newSGR(36) + WhiteF = newSGR(37) + + DefaultF = newSGR(39) + + BlackB = newSGR(40) + RedB = newSGR(41) + GreenB = newSGR(42) + YellowB = newSGR(43) + BlueB = newSGR(44) + MagentaB = newSGR(45) + CyanB = newSGR(46) + WhiteB = newSGR(47) + + DefaultB = newSGR(49) + + Frame = newSGR(51) + Encircle = newSGR(52) + Overline = newSGR(53) + + LightBlackF = newSGR(90) + LightRedF = newSGR(91) + LightGreenF = newSGR(92) + LightYellowF = newSGR(93) + LightBlueF = newSGR(94) + LightMagentaF = newSGR(95) + LightCyanF = newSGR(96) + LightWhiteF = newSGR(97) + + LightBlackB = newSGR(100) + LightRedB = newSGR(101) + LightGreenB = newSGR(102) + LightYellowB = newSGR(103) + LightBlueB = newSGR(104) + LightMagentaB = newSGR(105) + LightCyanB = newSGR(106) + LightWhiteB = newSGR(107) +} diff --git a/vendor/github.com/opencontainers/go-digest/.mailmap b/vendor/github.com/opencontainers/go-digest/.mailmap new file mode 100644 index 0000000000..eaf8b2f9e6 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/.mailmap @@ -0,0 +1,4 @@ +Aaron Lehmann +Derek McGowan +Stephen J Day +Haibing Zhou diff --git a/vendor/github.com/opencontainers/go-digest/.pullapprove.yml b/vendor/github.com/opencontainers/go-digest/.pullapprove.yml new file mode 100644 index 0000000000..b6165f83ca --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/.pullapprove.yml @@ -0,0 +1,28 @@ +version: 2 + +requirements: + signed_off_by: + required: true + +always_pending: + title_regex: '^WIP' + explanation: 'Work in progress...' + +group_defaults: + required: 2 + approve_by_comment: + enabled: true + approve_regex: '^LGTM' + reject_regex: '^Rejected' + reset_on_push: + enabled: true + author_approval: + ignored: true + conditions: + branches: + - master + +groups: + go-digest: + teams: + - go-digest-maintainers diff --git a/vendor/github.com/opencontainers/go-digest/.travis.yml b/vendor/github.com/opencontainers/go-digest/.travis.yml new file mode 100644 index 0000000000..5775f885c1 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/.travis.yml @@ -0,0 +1,5 @@ +language: go +go: + - 1.12.x + - 1.13.x + - master diff --git a/vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md b/vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md new file mode 100644 index 0000000000..e4d962ac16 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/CONTRIBUTING.md @@ -0,0 +1,72 @@ +# Contributing to Docker open source projects + +Want to hack on this project? Awesome! Here are instructions to get you started. + +This project is a part of the [Docker](https://www.docker.com) project, and follows +the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read Docker's +[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), +[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), +[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and +[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). + +For an in-depth description of our contribution process, visit the +contributors guide: [Understand how to contribute](https://docs.docker.com/opensource/workflow/make-a-contribution/) + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +1 Letterman Drive +Suite D4700 +San Francisco, CA, 94129 + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. diff --git a/vendor/github.com/opencontainers/go-digest/LICENSE b/vendor/github.com/opencontainers/go-digest/LICENSE new file mode 100644 index 0000000000..3ac8ab6487 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/LICENSE @@ -0,0 +1,192 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2019, 2020 OCI Contributors + Copyright 2016 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/opencontainers/go-digest/LICENSE.docs b/vendor/github.com/opencontainers/go-digest/LICENSE.docs new file mode 100644 index 0000000000..e26cd4fc8e --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/LICENSE.docs @@ -0,0 +1,425 @@ +Attribution-ShareAlike 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More_considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution-ShareAlike 4.0 International Public +License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution-ShareAlike 4.0 International Public License ("Public +License"). To the extent this Public License may be interpreted as a +contract, You are granted the Licensed Rights in consideration of Your +acceptance of these terms and conditions, and the Licensor grants You +such rights in consideration of benefits the Licensor receives from +making the Licensed Material available under these terms and +conditions. + + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. BY-SA Compatible License means a license listed at + creativecommons.org/compatiblelicenses, approved by Creative + Commons as essentially the equivalent of this Public License. + + d. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + + e. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + f. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + g. License Elements means the license attributes listed in the name + of a Creative Commons Public License. The License Elements of this + Public License are Attribution and ShareAlike. + + h. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + i. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + j. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + k. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + l. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + m. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part; and + + b. produce, reproduce, and Share Adapted Material. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. Additional offer from the Licensor -- Adapted Material. + Every recipient of Adapted Material from You + automatically receives an offer from the Licensor to + exercise the Licensed Rights in the Adapted Material + under the conditions of the Adapter's License You apply. + + c. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties. + + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + b. ShareAlike. + + In addition to the conditions in Section 3(a), if You Share + Adapted Material You produce, the following conditions also apply. + + 1. The Adapter's License You apply must be a Creative Commons + license with the same License Elements, this version or + later, or a BY-SA Compatible License. + + 2. You must include the text of, or the URI or hyperlink to, the + Adapter's License You apply. You may satisfy this condition + in any reasonable manner based on the medium, means, and + context in which You Share Adapted Material. + + 3. You may not offer or impose any additional or different terms + or conditions on, or apply any Effective Technological + Measures to, Adapted Material that restrict exercise of the + rights granted under the Adapter's License You apply. + + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material, + + including for purposes of Section 3(b); and + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + + +======================================================================= + +Creative Commons is not a party to its public licenses. +Notwithstanding, Creative Commons may elect to apply one of its public +licenses to material it publishes and in those instances will be +considered the "Licensor." Except for the limited purpose of indicating +that material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the public +licenses. + +Creative Commons may be contacted at creativecommons.org. diff --git a/vendor/github.com/opencontainers/go-digest/MAINTAINERS b/vendor/github.com/opencontainers/go-digest/MAINTAINERS new file mode 100644 index 0000000000..843b1b2061 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/MAINTAINERS @@ -0,0 +1,5 @@ +Derek McGowan (@dmcgowan) +Stephen Day (@stevvooe) +Vincent Batts (@vbatts) +Akihiro Suda (@AkihiroSuda) +Sebastiaan van Stijn (@thaJeztah) diff --git a/vendor/github.com/opencontainers/go-digest/README.md b/vendor/github.com/opencontainers/go-digest/README.md new file mode 100644 index 0000000000..a11287207e --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/README.md @@ -0,0 +1,96 @@ +# go-digest + +[![GoDoc](https://godoc.org/github.com/opencontainers/go-digest?status.svg)](https://godoc.org/github.com/opencontainers/go-digest) [![Go Report Card](https://goreportcard.com/badge/github.com/opencontainers/go-digest)](https://goreportcard.com/report/github.com/opencontainers/go-digest) [![Build Status](https://travis-ci.org/opencontainers/go-digest.svg?branch=master)](https://travis-ci.org/opencontainers/go-digest) + +Common digest package used across the container ecosystem. + +Please see the [godoc](https://godoc.org/github.com/opencontainers/go-digest) for more information. + +# What is a digest? + +A digest is just a [hash](https://en.wikipedia.org/wiki/Hash_function). + +The most common use case for a digest is to create a content identifier for use in [Content Addressable Storage](https://en.wikipedia.org/wiki/Content-addressable_storage) systems: + +```go +id := digest.FromBytes([]byte("my content")) +``` + +In the example above, the id can be used to uniquely identify the byte slice "my content". +This allows two disparate applications to agree on a verifiable identifier without having to trust one another. + +An identifying digest can be verified, as follows: + +```go +if id != digest.FromBytes([]byte("my content")) { + return errors.New("the content has changed!") +} +``` + +A `Verifier` type can be used to handle cases where an `io.Reader` makes more sense: + +```go +rd := getContent() +verifier := id.Verifier() +io.Copy(verifier, rd) + +if !verifier.Verified() { + return errors.New("the content has changed!") +} +``` + +Using [Merkle DAGs](https://en.wikipedia.org/wiki/Merkle_tree), this can power a rich, safe, content distribution system. + +# Usage + +While the [godoc](https://godoc.org/github.com/opencontainers/go-digest) is considered the best resource, a few important items need to be called out when using this package. + +1. Make sure to import the hash implementations into your application or the package will panic. + You should have something like the following in the main (or other entrypoint) of your application: + + ```go + import ( + _ "crypto/sha256" + _ "crypto/sha512" + ) + ``` + This may seem inconvenient but it allows you replace the hash + implementations with others, such as https://github.com/stevvooe/resumable. + +2. Even though `digest.Digest` may be assemblable as a string, _always_ verify your input with `digest.Parse` or use `Digest.Validate` when accepting untrusted input. + While there are measures to avoid common problems, this will ensure you have valid digests in the rest of your application. + +3. While alternative encodings of hash values (digests) are possible (for example, base64), this package deals exclusively with hex-encoded digests. + +# Stability + +The Go API, at this stage, is considered stable, unless otherwise noted. + +As always, before using a package export, read the [godoc](https://godoc.org/github.com/opencontainers/go-digest). + +# Contributing + +This package is considered fairly complete. +It has been in production in thousands (millions?) of deployments and is fairly battle-hardened. +New additions will be met with skepticism. +If you think there is a missing feature, please file a bug clearly describing the problem and the alternatives you tried before submitting a PR. + +## Code of Conduct + +Participation in the OpenContainers community is governed by [OpenContainer's Code of Conduct][code-of-conduct]. + +## Security + +If you find an issue, please follow the [security][security] protocol to report it. + +# Copyright and license + +Copyright © 2019, 2020 OCI Contributors +Copyright © 2016 Docker, Inc. +All rights reserved, except as follows. +Code is released under the [Apache 2.0 license](LICENSE). +This `README.md` file and the [`CONTRIBUTING.md`](CONTRIBUTING.md) file are licensed under the Creative Commons Attribution 4.0 International License under the terms and conditions set forth in the file [`LICENSE.docs`](LICENSE.docs). +You may obtain a duplicate copy of the same license, titled CC BY-SA 4.0, at http://creativecommons.org/licenses/by-sa/4.0/. + +[security]: https://github.com/opencontainers/org/blob/master/security +[code-of-conduct]: https://github.com/opencontainers/org/blob/master/CODE_OF_CONDUCT.md diff --git a/vendor/github.com/opencontainers/go-digest/algorithm.go b/vendor/github.com/opencontainers/go-digest/algorithm.go new file mode 100644 index 0000000000..490951dc3f --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/algorithm.go @@ -0,0 +1,193 @@ +// Copyright 2019, 2020 OCI Contributors +// Copyright 2017 Docker, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package digest + +import ( + "crypto" + "fmt" + "hash" + "io" + "regexp" +) + +// Algorithm identifies and implementation of a digester by an identifier. +// Note the that this defines both the hash algorithm used and the string +// encoding. +type Algorithm string + +// supported digest types +const ( + SHA256 Algorithm = "sha256" // sha256 with hex encoding (lower case only) + SHA384 Algorithm = "sha384" // sha384 with hex encoding (lower case only) + SHA512 Algorithm = "sha512" // sha512 with hex encoding (lower case only) + + // Canonical is the primary digest algorithm used with the distribution + // project. Other digests may be used but this one is the primary storage + // digest. + Canonical = SHA256 +) + +var ( + // TODO(stevvooe): Follow the pattern of the standard crypto package for + // registration of digests. Effectively, we are a registerable set and + // common symbol access. + + // algorithms maps values to hash.Hash implementations. Other algorithms + // may be available but they cannot be calculated by the digest package. + algorithms = map[Algorithm]crypto.Hash{ + SHA256: crypto.SHA256, + SHA384: crypto.SHA384, + SHA512: crypto.SHA512, + } + + // anchoredEncodedRegexps contains anchored regular expressions for hex-encoded digests. + // Note that /A-F/ disallowed. + anchoredEncodedRegexps = map[Algorithm]*regexp.Regexp{ + SHA256: regexp.MustCompile(`^[a-f0-9]{64}$`), + SHA384: regexp.MustCompile(`^[a-f0-9]{96}$`), + SHA512: regexp.MustCompile(`^[a-f0-9]{128}$`), + } +) + +// Available returns true if the digest type is available for use. If this +// returns false, Digester and Hash will return nil. +func (a Algorithm) Available() bool { + h, ok := algorithms[a] + if !ok { + return false + } + + // check availability of the hash, as well + return h.Available() +} + +func (a Algorithm) String() string { + return string(a) +} + +// Size returns number of bytes returned by the hash. +func (a Algorithm) Size() int { + h, ok := algorithms[a] + if !ok { + return 0 + } + return h.Size() +} + +// Set implemented to allow use of Algorithm as a command line flag. +func (a *Algorithm) Set(value string) error { + if value == "" { + *a = Canonical + } else { + // just do a type conversion, support is queried with Available. + *a = Algorithm(value) + } + + if !a.Available() { + return ErrDigestUnsupported + } + + return nil +} + +// Digester returns a new digester for the specified algorithm. If the algorithm +// does not have a digester implementation, nil will be returned. This can be +// checked by calling Available before calling Digester. +func (a Algorithm) Digester() Digester { + return &digester{ + alg: a, + hash: a.Hash(), + } +} + +// Hash returns a new hash as used by the algorithm. If not available, the +// method will panic. Check Algorithm.Available() before calling. +func (a Algorithm) Hash() hash.Hash { + if !a.Available() { + // Empty algorithm string is invalid + if a == "" { + panic(fmt.Sprintf("empty digest algorithm, validate before calling Algorithm.Hash()")) + } + + // NOTE(stevvooe): A missing hash is usually a programming error that + // must be resolved at compile time. We don't import in the digest + // package to allow users to choose their hash implementation (such as + // when using stevvooe/resumable or a hardware accelerated package). + // + // Applications that may want to resolve the hash at runtime should + // call Algorithm.Available before call Algorithm.Hash(). + panic(fmt.Sprintf("%v not available (make sure it is imported)", a)) + } + + return algorithms[a].New() +} + +// Encode encodes the raw bytes of a digest, typically from a hash.Hash, into +// the encoded portion of the digest. +func (a Algorithm) Encode(d []byte) string { + // TODO(stevvooe): Currently, all algorithms use a hex encoding. When we + // add support for back registration, we can modify this accordingly. + return fmt.Sprintf("%x", d) +} + +// FromReader returns the digest of the reader using the algorithm. +func (a Algorithm) FromReader(rd io.Reader) (Digest, error) { + digester := a.Digester() + + if _, err := io.Copy(digester.Hash(), rd); err != nil { + return "", err + } + + return digester.Digest(), nil +} + +// FromBytes digests the input and returns a Digest. +func (a Algorithm) FromBytes(p []byte) Digest { + digester := a.Digester() + + if _, err := digester.Hash().Write(p); err != nil { + // Writes to a Hash should never fail. None of the existing + // hash implementations in the stdlib or hashes vendored + // here can return errors from Write. Having a panic in this + // condition instead of having FromBytes return an error value + // avoids unnecessary error handling paths in all callers. + panic("write to hash function returned error: " + err.Error()) + } + + return digester.Digest() +} + +// FromString digests the string input and returns a Digest. +func (a Algorithm) FromString(s string) Digest { + return a.FromBytes([]byte(s)) +} + +// Validate validates the encoded portion string +func (a Algorithm) Validate(encoded string) error { + r, ok := anchoredEncodedRegexps[a] + if !ok { + return ErrDigestUnsupported + } + // Digests much always be hex-encoded, ensuring that their hex portion will + // always be size*2 + if a.Size()*2 != len(encoded) { + return ErrDigestInvalidLength + } + if r.MatchString(encoded) { + return nil + } + return ErrDigestInvalidFormat +} diff --git a/vendor/github.com/opencontainers/go-digest/digest.go b/vendor/github.com/opencontainers/go-digest/digest.go new file mode 100644 index 0000000000..518b5e7154 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/digest.go @@ -0,0 +1,157 @@ +// Copyright 2019, 2020 OCI Contributors +// Copyright 2017 Docker, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package digest + +import ( + "fmt" + "hash" + "io" + "regexp" + "strings" +) + +// Digest allows simple protection of hex formatted digest strings, prefixed +// by their algorithm. Strings of type Digest have some guarantee of being in +// the correct format and it provides quick access to the components of a +// digest string. +// +// The following is an example of the contents of Digest types: +// +// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc +// +// This allows to abstract the digest behind this type and work only in those +// terms. +type Digest string + +// NewDigest returns a Digest from alg and a hash.Hash object. +func NewDigest(alg Algorithm, h hash.Hash) Digest { + return NewDigestFromBytes(alg, h.Sum(nil)) +} + +// NewDigestFromBytes returns a new digest from the byte contents of p. +// Typically, this can come from hash.Hash.Sum(...) or xxx.SumXXX(...) +// functions. This is also useful for rebuilding digests from binary +// serializations. +func NewDigestFromBytes(alg Algorithm, p []byte) Digest { + return NewDigestFromEncoded(alg, alg.Encode(p)) +} + +// NewDigestFromHex is deprecated. Please use NewDigestFromEncoded. +func NewDigestFromHex(alg, hex string) Digest { + return NewDigestFromEncoded(Algorithm(alg), hex) +} + +// NewDigestFromEncoded returns a Digest from alg and the encoded digest. +func NewDigestFromEncoded(alg Algorithm, encoded string) Digest { + return Digest(fmt.Sprintf("%s:%s", alg, encoded)) +} + +// DigestRegexp matches valid digest types. +var DigestRegexp = regexp.MustCompile(`[a-z0-9]+(?:[.+_-][a-z0-9]+)*:[a-zA-Z0-9=_-]+`) + +// DigestRegexpAnchored matches valid digest types, anchored to the start and end of the match. +var DigestRegexpAnchored = regexp.MustCompile(`^` + DigestRegexp.String() + `$`) + +var ( + // ErrDigestInvalidFormat returned when digest format invalid. + ErrDigestInvalidFormat = fmt.Errorf("invalid checksum digest format") + + // ErrDigestInvalidLength returned when digest has invalid length. + ErrDigestInvalidLength = fmt.Errorf("invalid checksum digest length") + + // ErrDigestUnsupported returned when the digest algorithm is unsupported. + ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm") +) + +// Parse parses s and returns the validated digest object. An error will +// be returned if the format is invalid. +func Parse(s string) (Digest, error) { + d := Digest(s) + return d, d.Validate() +} + +// FromReader consumes the content of rd until io.EOF, returning canonical digest. +func FromReader(rd io.Reader) (Digest, error) { + return Canonical.FromReader(rd) +} + +// FromBytes digests the input and returns a Digest. +func FromBytes(p []byte) Digest { + return Canonical.FromBytes(p) +} + +// FromString digests the input and returns a Digest. +func FromString(s string) Digest { + return Canonical.FromString(s) +} + +// Validate checks that the contents of d is a valid digest, returning an +// error if not. +func (d Digest) Validate() error { + s := string(d) + i := strings.Index(s, ":") + if i <= 0 || i+1 == len(s) { + return ErrDigestInvalidFormat + } + algorithm, encoded := Algorithm(s[:i]), s[i+1:] + if !algorithm.Available() { + if !DigestRegexpAnchored.MatchString(s) { + return ErrDigestInvalidFormat + } + return ErrDigestUnsupported + } + return algorithm.Validate(encoded) +} + +// Algorithm returns the algorithm portion of the digest. This will panic if +// the underlying digest is not in a valid format. +func (d Digest) Algorithm() Algorithm { + return Algorithm(d[:d.sepIndex()]) +} + +// Verifier returns a writer object that can be used to verify a stream of +// content against the digest. If the digest is invalid, the method will panic. +func (d Digest) Verifier() Verifier { + return hashVerifier{ + hash: d.Algorithm().Hash(), + digest: d, + } +} + +// Encoded returns the encoded portion of the digest. This will panic if the +// underlying digest is not in a valid format. +func (d Digest) Encoded() string { + return string(d[d.sepIndex()+1:]) +} + +// Hex is deprecated. Please use Digest.Encoded. +func (d Digest) Hex() string { + return d.Encoded() +} + +func (d Digest) String() string { + return string(d) +} + +func (d Digest) sepIndex() int { + i := strings.Index(string(d), ":") + + if i < 0 { + panic(fmt.Sprintf("no ':' separator in digest %q", d)) + } + + return i +} diff --git a/vendor/github.com/opencontainers/go-digest/digester.go b/vendor/github.com/opencontainers/go-digest/digester.go new file mode 100644 index 0000000000..ede9077571 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/digester.go @@ -0,0 +1,40 @@ +// Copyright 2019, 2020 OCI Contributors +// Copyright 2017 Docker, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package digest + +import "hash" + +// Digester calculates the digest of written data. Writes should go directly +// to the return value of Hash, while calling Digest will return the current +// value of the digest. +type Digester interface { + Hash() hash.Hash // provides direct access to underlying hash instance. + Digest() Digest +} + +// digester provides a simple digester definition that embeds a hasher. +type digester struct { + alg Algorithm + hash hash.Hash +} + +func (d *digester) Hash() hash.Hash { + return d.hash +} + +func (d *digester) Digest() Digest { + return NewDigest(d.alg, d.hash) +} diff --git a/vendor/github.com/opencontainers/go-digest/doc.go b/vendor/github.com/opencontainers/go-digest/doc.go new file mode 100644 index 0000000000..83d3a936ca --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/doc.go @@ -0,0 +1,62 @@ +// Copyright 2019, 2020 OCI Contributors +// Copyright 2017 Docker, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package digest provides a generalized type to opaquely represent message +// digests and their operations within the registry. The Digest type is +// designed to serve as a flexible identifier in a content-addressable system. +// More importantly, it provides tools and wrappers to work with +// hash.Hash-based digests with little effort. +// +// Basics +// +// The format of a digest is simply a string with two parts, dubbed the +// "algorithm" and the "digest", separated by a colon: +// +// : +// +// An example of a sha256 digest representation follows: +// +// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc +// +// The "algorithm" portion defines both the hashing algorithm used to calculate +// the digest and the encoding of the resulting digest, which defaults to "hex" +// if not otherwise specified. Currently, all supported algorithms have their +// digests encoded in hex strings. +// +// In the example above, the string "sha256" is the algorithm and the hex bytes +// are the "digest". +// +// Because the Digest type is simply a string, once a valid Digest is +// obtained, comparisons are cheap, quick and simple to express with the +// standard equality operator. +// +// Verification +// +// The main benefit of using the Digest type is simple verification against a +// given digest. The Verifier interface, modeled after the stdlib hash.Hash +// interface, provides a common write sink for digest verification. After +// writing is complete, calling the Verifier.Verified method will indicate +// whether or not the stream of bytes matches the target digest. +// +// Missing Features +// +// In addition to the above, we intend to add the following features to this +// package: +// +// 1. A Digester type that supports write sink digest calculation. +// +// 2. Suspend and resume of ongoing digest calculations to support efficient digest verification in the registry. +// +package digest diff --git a/vendor/github.com/opencontainers/go-digest/verifiers.go b/vendor/github.com/opencontainers/go-digest/verifiers.go new file mode 100644 index 0000000000..afef506f46 --- /dev/null +++ b/vendor/github.com/opencontainers/go-digest/verifiers.go @@ -0,0 +1,46 @@ +// Copyright 2019, 2020 OCI Contributors +// Copyright 2017 Docker, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package digest + +import ( + "hash" + "io" +) + +// Verifier presents a general verification interface to be used with message +// digests and other byte stream verifications. Users instantiate a Verifier +// from one of the various methods, write the data under test to it then check +// the result with the Verified method. +type Verifier interface { + io.Writer + + // Verified will return true if the content written to Verifier matches + // the digest. + Verified() bool +} + +type hashVerifier struct { + digest Digest + hash hash.Hash +} + +func (hv hashVerifier) Write(p []byte) (n int, err error) { + return hv.hash.Write(p) +} + +func (hv hashVerifier) Verified() bool { + return hv.digest == NewDigest(hv.digest.Algorithm(), hv.hash) +} diff --git a/vendor/github.com/opencontainers/image-spec/LICENSE b/vendor/github.com/opencontainers/image-spec/LICENSE new file mode 100644 index 0000000000..9fdc20fdb6 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2016 The Linux Foundation. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go new file mode 100644 index 0000000000..35d8108958 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go @@ -0,0 +1,56 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +const ( + // AnnotationCreated is the annotation key for the date and time on which the image was built (date-time string as defined by RFC 3339). + AnnotationCreated = "org.opencontainers.image.created" + + // AnnotationAuthors is the annotation key for the contact details of the people or organization responsible for the image (freeform string). + AnnotationAuthors = "org.opencontainers.image.authors" + + // AnnotationURL is the annotation key for the URL to find more information on the image. + AnnotationURL = "org.opencontainers.image.url" + + // AnnotationDocumentation is the annotation key for the URL to get documentation on the image. + AnnotationDocumentation = "org.opencontainers.image.documentation" + + // AnnotationSource is the annotation key for the URL to get source code for building the image. + AnnotationSource = "org.opencontainers.image.source" + + // AnnotationVersion is the annotation key for the version of the packaged software. + // The version MAY match a label or tag in the source code repository. + // The version MAY be Semantic versioning-compatible. + AnnotationVersion = "org.opencontainers.image.version" + + // AnnotationRevision is the annotation key for the source control revision identifier for the packaged software. + AnnotationRevision = "org.opencontainers.image.revision" + + // AnnotationVendor is the annotation key for the name of the distributing entity, organization or individual. + AnnotationVendor = "org.opencontainers.image.vendor" + + // AnnotationLicenses is the annotation key for the license(s) under which contained software is distributed as an SPDX License Expression. + AnnotationLicenses = "org.opencontainers.image.licenses" + + // AnnotationRefName is the annotation key for the name of the reference for a target. + // SHOULD only be considered valid when on descriptors on `index.json` within image layout. + AnnotationRefName = "org.opencontainers.image.ref.name" + + // AnnotationTitle is the annotation key for the human-readable title of the image. + AnnotationTitle = "org.opencontainers.image.title" + + // AnnotationDescription is the annotation key for the human-readable description of the software packaged in the image. + AnnotationDescription = "org.opencontainers.image.description" +) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go new file mode 100644 index 0000000000..fe799bd698 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go @@ -0,0 +1,103 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import ( + "time" + + digest "github.com/opencontainers/go-digest" +) + +// ImageConfig defines the execution parameters which should be used as a base when running a container using an image. +type ImageConfig struct { + // User defines the username or UID which the process in the container should run as. + User string `json:"User,omitempty"` + + // ExposedPorts a set of ports to expose from a container running this image. + ExposedPorts map[string]struct{} `json:"ExposedPorts,omitempty"` + + // Env is a list of environment variables to be used in a container. + Env []string `json:"Env,omitempty"` + + // Entrypoint defines a list of arguments to use as the command to execute when the container starts. + Entrypoint []string `json:"Entrypoint,omitempty"` + + // Cmd defines the default arguments to the entrypoint of the container. + Cmd []string `json:"Cmd,omitempty"` + + // Volumes is a set of directories describing where the process is likely write data specific to a container instance. + Volumes map[string]struct{} `json:"Volumes,omitempty"` + + // WorkingDir sets the current working directory of the entrypoint process in the container. + WorkingDir string `json:"WorkingDir,omitempty"` + + // Labels contains arbitrary metadata for the container. + Labels map[string]string `json:"Labels,omitempty"` + + // StopSignal contains the system call signal that will be sent to the container to exit. + StopSignal string `json:"StopSignal,omitempty"` +} + +// RootFS describes a layer content addresses +type RootFS struct { + // Type is the type of the rootfs. + Type string `json:"type"` + + // DiffIDs is an array of layer content hashes (DiffIDs), in order from bottom-most to top-most. + DiffIDs []digest.Digest `json:"diff_ids"` +} + +// History describes the history of a layer. +type History struct { + // Created is the combined date and time at which the layer was created, formatted as defined by RFC 3339, section 5.6. + Created *time.Time `json:"created,omitempty"` + + // CreatedBy is the command which created the layer. + CreatedBy string `json:"created_by,omitempty"` + + // Author is the author of the build point. + Author string `json:"author,omitempty"` + + // Comment is a custom message set when creating the layer. + Comment string `json:"comment,omitempty"` + + // EmptyLayer is used to mark if the history item created a filesystem diff. + EmptyLayer bool `json:"empty_layer,omitempty"` +} + +// Image is the JSON structure which describes some basic information about the image. +// This provides the `application/vnd.oci.image.config.v1+json` mediatype when marshalled to JSON. +type Image struct { + // Created is the combined date and time at which the image was created, formatted as defined by RFC 3339, section 5.6. + Created *time.Time `json:"created,omitempty"` + + // Author defines the name and/or email address of the person or entity which created and is responsible for maintaining the image. + Author string `json:"author,omitempty"` + + // Architecture is the CPU architecture which the binaries in this image are built to run on. + Architecture string `json:"architecture"` + + // OS is the name of the operating system which the image is built to run on. + OS string `json:"os"` + + // Config defines the execution parameters which should be used as a base when running a container using the image. + Config ImageConfig `json:"config,omitempty"` + + // RootFS references the layer content addresses used by the image. + RootFS RootFS `json:"rootfs"` + + // History describes the history of each layer. + History []History `json:"history,omitempty"` +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go new file mode 100644 index 0000000000..6e442a0853 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/descriptor.go @@ -0,0 +1,64 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import digest "github.com/opencontainers/go-digest" + +// Descriptor describes the disposition of targeted content. +// This structure provides `application/vnd.oci.descriptor.v1+json` mediatype +// when marshalled to JSON. +type Descriptor struct { + // MediaType is the media type of the object this schema refers to. + MediaType string `json:"mediaType,omitempty"` + + // Digest is the digest of the targeted content. + Digest digest.Digest `json:"digest"` + + // Size specifies the size in bytes of the blob. + Size int64 `json:"size"` + + // URLs specifies a list of URLs from which this object MAY be downloaded + URLs []string `json:"urls,omitempty"` + + // Annotations contains arbitrary metadata relating to the targeted content. + Annotations map[string]string `json:"annotations,omitempty"` + + // Platform describes the platform which the image in the manifest runs on. + // + // This should only be used when referring to a manifest. + Platform *Platform `json:"platform,omitempty"` +} + +// Platform describes the platform which the image in the manifest runs on. +type Platform struct { + // Architecture field specifies the CPU architecture, for example + // `amd64` or `ppc64`. + Architecture string `json:"architecture"` + + // OS specifies the operating system, for example `linux` or `windows`. + OS string `json:"os"` + + // OSVersion is an optional field specifying the operating system + // version, for example on Windows `10.0.14393.1066`. + OSVersion string `json:"os.version,omitempty"` + + // OSFeatures is an optional field specifying an array of strings, + // each listing a required OS feature (for example on Windows `win32k`). + OSFeatures []string `json:"os.features,omitempty"` + + // Variant is an optional field specifying a variant of the CPU, for + // example `v7` to specify ARMv7 when architecture is `arm`. + Variant string `json:"variant,omitempty"` +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go new file mode 100644 index 0000000000..82da6c6a89 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/index.go @@ -0,0 +1,32 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import "github.com/opencontainers/image-spec/specs-go" + +// Index references manifests for various platforms. +// This structure provides `application/vnd.oci.image.index.v1+json` mediatype when marshalled to JSON. +type Index struct { + specs.Versioned + + // MediaType specificies the type of this document data structure e.g. `application/vnd.oci.image.index.v1+json` + MediaType string `json:"mediaType,omitempty"` + + // Manifests references platform specific manifests. + Manifests []Descriptor `json:"manifests"` + + // Annotations contains arbitrary metadata for the image index. + Annotations map[string]string `json:"annotations,omitempty"` +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go new file mode 100644 index 0000000000..fc79e9e0d1 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/layout.go @@ -0,0 +1,28 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +const ( + // ImageLayoutFile is the file name of oci image layout file + ImageLayoutFile = "oci-layout" + // ImageLayoutVersion is the version of ImageLayout + ImageLayoutVersion = "1.0.0" +) + +// ImageLayout is the structure in the "oci-layout" file, found in the root +// of an OCI Image-layout directory. +type ImageLayout struct { + Version string `json:"imageLayoutVersion"` +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go new file mode 100644 index 0000000000..d72d15ce4b --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/manifest.go @@ -0,0 +1,35 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +import "github.com/opencontainers/image-spec/specs-go" + +// Manifest provides `application/vnd.oci.image.manifest.v1+json` mediatype structure when marshalled to JSON. +type Manifest struct { + specs.Versioned + + // MediaType specificies the type of this document data structure e.g. `application/vnd.oci.image.manifest.v1+json` + MediaType string `json:"mediaType,omitempty"` + + // Config references a configuration object for a container, by digest. + // The referenced configuration object is a JSON blob that the runtime uses to set up the container. + Config Descriptor `json:"config"` + + // Layers is an indexed list of layers referenced by the manifest. + Layers []Descriptor `json:"layers"` + + // Annotations contains arbitrary metadata for the image manifest. + Annotations map[string]string `json:"annotations,omitempty"` +} diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go new file mode 100644 index 0000000000..bad7bb97f4 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go @@ -0,0 +1,48 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package v1 + +const ( + // MediaTypeDescriptor specifies the media type for a content descriptor. + MediaTypeDescriptor = "application/vnd.oci.descriptor.v1+json" + + // MediaTypeLayoutHeader specifies the media type for the oci-layout. + MediaTypeLayoutHeader = "application/vnd.oci.layout.header.v1+json" + + // MediaTypeImageManifest specifies the media type for an image manifest. + MediaTypeImageManifest = "application/vnd.oci.image.manifest.v1+json" + + // MediaTypeImageIndex specifies the media type for an image index. + MediaTypeImageIndex = "application/vnd.oci.image.index.v1+json" + + // MediaTypeImageLayer is the media type used for layers referenced by the manifest. + MediaTypeImageLayer = "application/vnd.oci.image.layer.v1.tar" + + // MediaTypeImageLayerGzip is the media type used for gzipped layers + // referenced by the manifest. + MediaTypeImageLayerGzip = "application/vnd.oci.image.layer.v1.tar+gzip" + + // MediaTypeImageLayerNonDistributable is the media type for layers referenced by + // the manifest but with distribution restrictions. + MediaTypeImageLayerNonDistributable = "application/vnd.oci.image.layer.nondistributable.v1.tar" + + // MediaTypeImageLayerNonDistributableGzip is the media type for + // gzipped layers referenced by the manifest but with distribution + // restrictions. + MediaTypeImageLayerNonDistributableGzip = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip" + + // MediaTypeImageConfig specifies the media type for the image configuration. + MediaTypeImageConfig = "application/vnd.oci.image.config.v1+json" +) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/version.go b/vendor/github.com/opencontainers/image-spec/specs-go/version.go new file mode 100644 index 0000000000..0d9543f160 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/version.go @@ -0,0 +1,32 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package specs + +import "fmt" + +const ( + // VersionMajor is for an API incompatible changes + VersionMajor = 1 + // VersionMinor is for functionality in a backwards-compatible manner + VersionMinor = 0 + // VersionPatch is for backwards-compatible bug fixes + VersionPatch = 2 + + // VersionDev indicates development branch. Releases will be empty string. + VersionDev = "" +) + +// Version is the specification version that the package types support. +var Version = fmt.Sprintf("%d.%d.%d%s", VersionMajor, VersionMinor, VersionPatch, VersionDev) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go b/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go new file mode 100644 index 0000000000..58a1510f33 --- /dev/null +++ b/vendor/github.com/opencontainers/image-spec/specs-go/versioned.go @@ -0,0 +1,23 @@ +// Copyright 2016 The Linux Foundation +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package specs + +// Versioned provides a struct with the manifest schemaVersion and mediaType. +// Incoming content with unknown schema version can be decoded against this +// struct to check the version. +type Versioned struct { + // SchemaVersion is the image manifest schema that this image follows + SchemaVersion int `json:"schemaVersion"` +} diff --git a/vendor/github.com/rubenv/sql-migrate/.dockerignore b/vendor/github.com/rubenv/sql-migrate/.dockerignore new file mode 100644 index 0000000000..42bb921b46 --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/.dockerignore @@ -0,0 +1,6 @@ +* +!go.mod +!go.sum +!*.go +!sql-migrate/*.go +!sqlparse/*.go diff --git a/vendor/github.com/rubenv/sql-migrate/.gitignore b/vendor/github.com/rubenv/sql-migrate/.gitignore new file mode 100644 index 0000000000..b083cca4a8 --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/.gitignore @@ -0,0 +1,7 @@ +.*.swp +*.test +.idea +/vendor/ + +/sql-migrate/test.db +/test.db diff --git a/vendor/github.com/rubenv/sql-migrate/.travis.yml b/vendor/github.com/rubenv/sql-migrate/.travis.yml new file mode 100644 index 0000000000..64f8c15b2f --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/.travis.yml @@ -0,0 +1,33 @@ +language: go + +sudo: false + +go: + - "1.13" + - "1.14" + - "1.15" + - "1.16" + +services: + - mysql + - postgresql + +before_install: + - mysql -e "CREATE DATABASE IF NOT EXISTS test;" -uroot + - mysql -e "CREATE DATABASE IF NOT EXISTS test_env;" -uroot + - psql -c "CREATE DATABASE test;" -U postgres + +install: + - go get -t ./... + - go install ./... + - go get -u github.com/kisielk/errcheck + +script: + - CGO_ENABLED=0 go build -v . + - go test -v ./... + - bash test-integration/postgres.sh + - bash test-integration/mysql.sh + - bash test-integration/mysql-flag.sh + - bash test-integration/mysql-env.sh + - bash test-integration/sqlite.sh + - errcheck ./... diff --git a/vendor/github.com/rubenv/sql-migrate/Dockerfile b/vendor/github.com/rubenv/sql-migrate/Dockerfile new file mode 100644 index 0000000000..cfa00f7eaf --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/Dockerfile @@ -0,0 +1,25 @@ +ARG GO_VERSION=1.16.2 +ARG ALPINE_VERSION=3.12 + +### Vendor +FROM golang:${GO_VERSION} as vendor +COPY . /project +WORKDIR /project +RUN go mod tidy && go mod vendor + +### Build binary +FROM golang:${GO_VERSION} as build-binary +COPY . /project +COPY --from=vendor /project/vendor /project/vendor +WORKDIR /project +RUN GOOS=linux GOARCH=amd64 CGO_ENABLED=0 GO111MODULE=on go build \ + -v \ + -mod vendor \ + -o /project/bin/sql-migrate \ + /project/sql-migrate + +### Image +FROM alpine:${ALPINE_VERSION} as image +COPY --from=build-binary /project/bin/sql-migrate /usr/local/bin/sql-migrate +RUN chmod +x /usr/local/bin/sql-migrate +ENTRYPOINT ["sql-migrate"] diff --git a/vendor/github.com/rubenv/sql-migrate/LICENSE b/vendor/github.com/rubenv/sql-migrate/LICENSE new file mode 100644 index 0000000000..95e7c9abe7 --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (C) 2014-2021 by Ruben Vermeersch + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/rubenv/sql-migrate/README.md b/vendor/github.com/rubenv/sql-migrate/README.md new file mode 100644 index 0000000000..c954aa8956 --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/README.md @@ -0,0 +1,400 @@ +# sql-migrate + +> SQL Schema migration tool for [Go](https://golang.org/). Based on [gorp](https://github.com/go-gorp/gorp) and [goose](https://bitbucket.org/liamstask/goose). + +[![Build Status](https://travis-ci.org/rubenv/sql-migrate.svg?branch=master)](https://travis-ci.org/rubenv/sql-migrate) [![GoDoc](https://godoc.org/github.com/rubenv/sql-migrate?status.svg)](https://godoc.org/github.com/rubenv/sql-migrate) + +Using [modl](https://github.com/jmoiron/modl)? Check out [modl-migrate](https://github.com/rubenv/modl-migrate). + +## Features + +* Usable as a CLI tool or as a library +* Supports SQLite, PostgreSQL, MySQL, MSSQL and Oracle databases (through [gorp](https://github.com/go-gorp/gorp)) +* Can embed migrations into your application +* Migrations are defined with SQL for full flexibility +* Atomic migrations +* Up/down migrations to allow rollback +* Supports multiple database types in one project +* Works great with other libraries such as [sqlx](https://jmoiron.github.io/sqlx/) + +## Installation + +To install the library and command line program, use the following: + +```bash +go get -v github.com/rubenv/sql-migrate/... +``` + +## Usage + +### As a standalone tool + +``` +$ sql-migrate --help +usage: sql-migrate [--version] [--help] [] + +Available commands are: + down Undo a database migration + new Create a new migration + redo Reapply the last migration + status Show migration status + up Migrates the database to the most recent version available +``` + +Each command requires a configuration file (which defaults to `dbconfig.yml`, but can be specified with the `-config` flag). This config file should specify one or more environments: + +```yml +development: + dialect: sqlite3 + datasource: test.db + dir: migrations/sqlite3 + +production: + dialect: postgres + datasource: dbname=myapp sslmode=disable + dir: migrations/postgres + table: migrations +``` + +(See more examples for different set ups [here](test-integration/dbconfig.yml)) + +Also one can obtain env variables in datasource field via `os.ExpandEnv` embedded call for the field. +This may be useful if one doesn't want to store credentials in file: + +```yml +production: + dialect: postgres + datasource: host=prodhost dbname=proddb user=${DB_USER} password=${DB_PASSWORD} sslmode=required + dir: migrations + table: migrations +``` + +The `table` setting is optional and will default to `gorp_migrations`. + +The environment that will be used can be specified with the `-env` flag (defaults to `development`). + +Use the `--help` flag in combination with any of the commands to get an overview of its usage: + +``` +$ sql-migrate up --help +Usage: sql-migrate up [options] ... + + Migrates the database to the most recent version available. + +Options: + + -config=dbconfig.yml Configuration file to use. + -env="development" Environment. + -limit=0 Limit the number of migrations (0 = unlimited). + -dryrun Don't apply migrations, just print them. +``` + +The `new` command creates a new empty migration template using the following pattern `-.sql`. + +The `up` command applies all available migrations. By contrast, `down` will only apply one migration by default. This behavior can be changed for both by using the `-limit` parameter. + +The `redo` command will unapply the last migration and reapply it. This is useful during development, when you're writing migrations. + +Use the `status` command to see the state of the applied migrations: + +```bash +$ sql-migrate status ++---------------+-----------------------------------------+ +| MIGRATION | APPLIED | ++---------------+-----------------------------------------+ +| 1_initial.sql | 2014-09-13 08:19:06.788354925 +0000 UTC | +| 2_record.sql | no | ++---------------+-----------------------------------------+ +``` + +#### Running Test Integrations +You can see how to run setups for different setups by executing the `.sh` files in [test-integration](test-integration/) + +```bash +# Run mysql-env.sh example (you need to be in the project root directory) + +./test-integration/mysql-env.sh +``` + +### MySQL Caveat + +If you are using MySQL, you must append `?parseTime=true` to the `datasource` configuration. For example: + +```yml +production: + dialect: mysql + datasource: root@/dbname?parseTime=true + dir: migrations/mysql + table: migrations +``` + +See [here](https://github.com/go-sql-driver/mysql#parsetime) for more information. + +### Oracle (oci8) +Oracle Driver is [oci8](https://github.com/mattn/go-oci8), it is not pure Go code and relies on Oracle Office Client ([Instant Client](https://www.oracle.com/database/technologies/instant-client/downloads.html)), more detailed information is in the [oci8 repo](https://github.com/mattn/go-oci8). + +#### Install with Oracle support + +To install the library and command line program, use the following: + +```bash +go get -tags oracle -v github.com/rubenv/sql-migrate/... +``` + +```yml +development: + dialect: oci8 + datasource: user/password@localhost:1521/sid + dir: migrations/oracle + table: migrations +``` + +### Oracle (godror) +Oracle Driver is [godror](https://github.com/godror/godror), it is not pure Go code and relies on Oracle Office Client ([Instant Client](https://www.oracle.com/database/technologies/instant-client/downloads.html)), more detailed information is in the [godror repository](https://github.com/godror/godror). + +#### Install with Oracle support + +To install the library and command line program, use the following: + +1. Install sql-migrate +```bash +go get -tags godror -v github.com/rubenv/sql-migrate/... +``` + +2. Download Oracle Office Client(e.g. macos, click [Instant Client](https://www.oracle.com/database/technologies/instant-client/downloads.html) if you are other system) +```bash +wget https://download.oracle.com/otn_software/mac/instantclient/193000/instantclient-basic-macos.x64-19.3.0.0.0dbru.zip +``` + +3. Configure environment variables `LD_LIBRARY_PATH` +``` +export LD_LIBRARY_PATH=your_oracle_office_path/instantclient_19_3 +``` + +```yml +development: + dialect: godror + datasource: user/password@localhost:1521/sid + dir: migrations/oracle + table: migrations +``` + + +### As a library + +Import sql-migrate into your application: + +```go +import "github.com/rubenv/sql-migrate" +``` + +Set up a source of migrations, this can be from memory, from a set of files, from bindata (more on that later), or from any library that implements [`http.FileSystem`](https://godoc.org/net/http#FileSystem): + +```go +// Hardcoded strings in memory: +migrations := &migrate.MemoryMigrationSource{ + Migrations: []*migrate.Migration{ + &migrate.Migration{ + Id: "123", + Up: []string{"CREATE TABLE people (id int)"}, + Down: []string{"DROP TABLE people"}, + }, + }, +} + +// OR: Read migrations from a folder: +migrations := &migrate.FileMigrationSource{ + Dir: "db/migrations", +} + +// OR: Use migrations from a packr box +migrations := &migrate.PackrMigrationSource{ + Box: packr.New("migrations", "./migrations"), +} + +// OR: Use pkger which implements `http.FileSystem` +migrationSource := &migrate.HttpFileSystemMigrationSource{ + FileSystem: pkger.Dir("/db/migrations"), +} + +// OR: Use migrations from bindata: +migrations := &migrate.AssetMigrationSource{ + Asset: Asset, + AssetDir: AssetDir, + Dir: "migrations", +} + +// OR: Read migrations from a `http.FileSystem` +migrationSource := &migrate.HttpFileSystemMigrationSource{ + FileSystem: httpFS, +} +``` + +Then use the `Exec` function to upgrade your database: + +```go +db, err := sql.Open("sqlite3", filename) +if err != nil { + // Handle errors! +} + +n, err := migrate.Exec(db, "sqlite3", migrations, migrate.Up) +if err != nil { + // Handle errors! +} +fmt.Printf("Applied %d migrations!\n", n) +``` + +Note that `n` can be greater than `0` even if there is an error: any migration that succeeded will remain applied even if a later one fails. + +Check [the GoDoc reference](https://godoc.org/github.com/rubenv/sql-migrate) for the full documentation. + +## Writing migrations +Migrations are defined in SQL files, which contain a set of SQL statements. Special comments are used to distinguish up and down migrations. + +```sql +-- +migrate Up +-- SQL in section 'Up' is executed when this migration is applied +CREATE TABLE people (id int); + + +-- +migrate Down +-- SQL section 'Down' is executed when this migration is rolled back +DROP TABLE people; +``` + +You can put multiple statements in each block, as long as you end them with a semicolon (`;`). + +You can alternatively set up a separator string that matches an entire line by setting `sqlparse.LineSeparator`. This +can be used to imitate, for example, MS SQL Query Analyzer functionality where commands can be separated by a line with +contents of `GO`. If `sqlparse.LineSeparator` is matched, it will not be included in the resulting migration scripts. + +If you have complex statements which contain semicolons, use `StatementBegin` and `StatementEnd` to indicate boundaries: + +```sql +-- +migrate Up +CREATE TABLE people (id int); + +-- +migrate StatementBegin +CREATE OR REPLACE FUNCTION do_something() +returns void AS $$ +DECLARE + create_query text; +BEGIN + -- Do something here +END; +$$ +language plpgsql; +-- +migrate StatementEnd + +-- +migrate Down +DROP FUNCTION do_something(); +DROP TABLE people; +``` + +The order in which migrations are applied is defined through the filename: sql-migrate will sort migrations based on their name. It's recommended to use an increasing version number or a timestamp as the first part of the filename. + +Normally each migration is run within a transaction in order to guarantee that it is fully atomic. However some SQL commands (for example creating an index concurrently in PostgreSQL) cannot be executed inside a transaction. In order to execute such a command in a migration, the migration can be run using the `notransaction` option: + +```sql +-- +migrate Up notransaction +CREATE UNIQUE INDEX CONCURRENTLY people_unique_id_idx ON people (id); + +-- +migrate Down +DROP INDEX people_unique_id_idx; +``` + +## Embedding migrations with [packr](https://github.com/gobuffalo/packr) + +If you like your Go applications self-contained (that is: a single binary): use [packr](https://github.com/gobuffalo/packr) to embed the migration files. + +Just write your migration files as usual, as a set of SQL files in a folder. + +Import the packr package into your application: + +```go +import "github.com/gobuffalo/packr/v2" +``` + +Use the `PackrMigrationSource` in your application to find the migrations: + +```go +migrations := &migrate.PackrMigrationSource{ + Box: packr.New("migrations", "./migrations"), +} +``` + +If you already have a box and would like to use a subdirectory: + +```go +migrations := &migrate.PackrMigrationSource{ + Box: myBox, + Dir: "./migrations", +} +``` + +## Embedding migrations with [bindata](https://github.com/shuLhan/go-bindata) + +As an alternative, but slightly less maintained, you can use [bindata](https://github.com/shuLhan/go-bindata) to embed the migration files. + +Just write your migration files as usual, as a set of SQL files in a folder. + +Then use bindata to generate a `.go` file with the migrations embedded: + +```bash +go-bindata -pkg myapp -o bindata.go db/migrations/ +``` + +The resulting `bindata.go` file will contain your migrations. Remember to regenerate your `bindata.go` file whenever you add/modify a migration (`go generate` will help here, once it arrives). + +Use the `AssetMigrationSource` in your application to find the migrations: + +```go +migrations := &migrate.AssetMigrationSource{ + Asset: Asset, + AssetDir: AssetDir, + Dir: "db/migrations", +} +``` + +Both `Asset` and `AssetDir` are functions provided by bindata. + +Then proceed as usual. + +## Embedding migrations with libraries that implement `http.FileSystem` + +You can also embed migrations with any library that implements `http.FileSystem`, like [`vfsgen`](https://github.com/shurcooL/vfsgen), [`parcello`](https://github.com/phogolabs/parcello), or [`go-resources`](https://github.com/omeid/go-resources). + +```go +migrationSource := &migrate.HttpFileSystemMigrationSource{ + FileSystem: httpFS, +} +``` + +## Extending + +Adding a new migration source means implementing `MigrationSource`. + +```go +type MigrationSource interface { + FindMigrations() ([]*Migration, error) +} +``` + +The resulting slice of migrations will be executed in the given order, so it should usually be sorted by the `Id` field. + +## Usage with [sqlx](https://jmoiron.github.io/sqlx/) + +This library is compatible with sqlx. When calling migrate just dereference the DB from your `*sqlx.DB`: + +``` +n, err := migrate.Exec(db.DB, "sqlite3", migrations, migrate.Up) + // ^^^ <-- Here db is a *sqlx.DB, the db.DB field is the plain sql.DB +if err != nil { + // Handle errors! +} +``` + +## License + +This library is distributed under the [MIT](LICENSE) license. diff --git a/vendor/github.com/rubenv/sql-migrate/doc.go b/vendor/github.com/rubenv/sql-migrate/doc.go new file mode 100644 index 0000000000..eb4ed8575a --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/doc.go @@ -0,0 +1,239 @@ +/* + +SQL Schema migration tool for Go. + +Key features: + + * Usable as a CLI tool or as a library + * Supports SQLite, PostgreSQL, MySQL, MSSQL and Oracle databases (through gorp) + * Can embed migrations into your application + * Migrations are defined with SQL for full flexibility + * Atomic migrations + * Up/down migrations to allow rollback + * Supports multiple database types in one project + +Installation + +To install the library and command line program, use the following: + + go get -v github.com/rubenv/sql-migrate/... + +Command-line tool + +The main command is called sql-migrate. + + $ sql-migrate --help + usage: sql-migrate [--version] [--help] [] + + Available commands are: + down Undo a database migration + new Create a new migration + redo Reapply the last migration + status Show migration status + up Migrates the database to the most recent version available + +Each command requires a configuration file (which defaults to dbconfig.yml, but can be specified with the -config flag). This config file should specify one or more environments: + + development: + dialect: sqlite3 + datasource: test.db + dir: migrations/sqlite3 + + production: + dialect: postgres + datasource: dbname=myapp sslmode=disable + dir: migrations/postgres + table: migrations + +The `table` setting is optional and will default to `gorp_migrations`. + +The environment that will be used can be specified with the -env flag (defaults to development). + +Use the --help flag in combination with any of the commands to get an overview of its usage: + + $ sql-migrate up --help + Usage: sql-migrate up [options] ... + + Migrates the database to the most recent version available. + + Options: + + -config=config.yml Configuration file to use. + -env="development" Environment. + -limit=0 Limit the number of migrations (0 = unlimited). + -dryrun Don't apply migrations, just print them. + +The up command applies all available migrations. By contrast, down will only apply one migration by default. This behavior can be changed for both by using the -limit parameter. + +The redo command will unapply the last migration and reapply it. This is useful during development, when you're writing migrations. + +Use the status command to see the state of the applied migrations: + + $ sql-migrate status + +---------------+-----------------------------------------+ + | MIGRATION | APPLIED | + +---------------+-----------------------------------------+ + | 1_initial.sql | 2014-09-13 08:19:06.788354925 +0000 UTC | + | 2_record.sql | no | + +---------------+-----------------------------------------+ + +MySQL Caveat + +If you are using MySQL, you must append ?parseTime=true to the datasource configuration. For example: + + production: + dialect: mysql + datasource: root@/dbname?parseTime=true + dir: migrations/mysql + table: migrations + +See https://github.com/go-sql-driver/mysql#parsetime for more information. + +Library + +Import sql-migrate into your application: + + import "github.com/rubenv/sql-migrate" + +Set up a source of migrations, this can be from memory, from a set of files or from bindata (more on that later): + + // Hardcoded strings in memory: + migrations := &migrate.MemoryMigrationSource{ + Migrations: []*migrate.Migration{ + &migrate.Migration{ + Id: "123", + Up: []string{"CREATE TABLE people (id int)"}, + Down: []string{"DROP TABLE people"}, + }, + }, + } + + // OR: Read migrations from a folder: + migrations := &migrate.FileMigrationSource{ + Dir: "db/migrations", + } + + // OR: Use migrations from bindata: + migrations := &migrate.AssetMigrationSource{ + Asset: Asset, + AssetDir: AssetDir, + Dir: "migrations", + } + +Then use the Exec function to upgrade your database: + + db, err := sql.Open("sqlite3", filename) + if err != nil { + // Handle errors! + } + + n, err := migrate.Exec(db, "sqlite3", migrations, migrate.Up) + if err != nil { + // Handle errors! + } + fmt.Printf("Applied %d migrations!\n", n) + +Note that n can be greater than 0 even if there is an error: any migration that succeeded will remain applied even if a later one fails. + +The full set of capabilities can be found in the API docs below. + +Writing migrations + +Migrations are defined in SQL files, which contain a set of SQL statements. Special comments are used to distinguish up and down migrations. + + -- +migrate Up + -- SQL in section 'Up' is executed when this migration is applied + CREATE TABLE people (id int); + + + -- +migrate Down + -- SQL section 'Down' is executed when this migration is rolled back + DROP TABLE people; + +You can put multiple statements in each block, as long as you end them with a semicolon (;). + +If you have complex statements which contain semicolons, use StatementBegin and StatementEnd to indicate boundaries: + + -- +migrate Up + CREATE TABLE people (id int); + + -- +migrate StatementBegin + CREATE OR REPLACE FUNCTION do_something() + returns void AS $$ + DECLARE + create_query text; + BEGIN + -- Do something here + END; + $$ + language plpgsql; + -- +migrate StatementEnd + + -- +migrate Down + DROP FUNCTION do_something(); + DROP TABLE people; + +The order in which migrations are applied is defined through the filename: sql-migrate will sort migrations based on their name. It's recommended to use an increasing version number or a timestamp as the first part of the filename. + +Normally each migration is run within a transaction in order to guarantee that it is fully atomic. However some SQL commands (for example creating an index concurrently in PostgreSQL) cannot be executed inside a transaction. In order to execute such a command in a migration, the migration can be run using the notransaction option: + + -- +migrate Up notransaction + CREATE UNIQUE INDEX people_unique_id_idx CONCURRENTLY ON people (id); + + -- +migrate Down + DROP INDEX people_unique_id_idx; + +Embedding migrations with packr + +If you like your Go applications self-contained (that is: a single binary): use packr (https://github.com/gobuffalo/packr) to embed the migration files. + +Just write your migration files as usual, as a set of SQL files in a folder. + +Use the PackrMigrationSource in your application to find the migrations: + + migrations := &migrate.PackrMigrationSource{ + Box: packr.NewBox("./migrations"), + } + +If you already have a box and would like to use a subdirectory: + + migrations := &migrate.PackrMigrationSource{ + Box: myBox, + Dir: "./migrations", + } + +Embedding migrations with bindata + +As an alternative, but slightly less maintained, you can use bindata (https://github.com/shuLhan/go-bindata) to embed the migration files. + +Just write your migration files as usual, as a set of SQL files in a folder. + +Then use bindata to generate a .go file with the migrations embedded: + + go-bindata -pkg myapp -o bindata.go db/migrations/ + +The resulting bindata.go file will contain your migrations. Remember to regenerate your bindata.go file whenever you add/modify a migration (go generate will help here, once it arrives). + +Use the AssetMigrationSource in your application to find the migrations: + + migrations := &migrate.AssetMigrationSource{ + Asset: Asset, + AssetDir: AssetDir, + Dir: "db/migrations", + } + +Both Asset and AssetDir are functions provided by bindata. + +Then proceed as usual. + +Extending + +Adding a new migration source means implementing MigrationSource. + + type MigrationSource interface { + FindMigrations() ([]*Migration, error) + } + +The resulting slice of migrations will be executed in the given order, so it should usually be sorted by the Id field. +*/ +package migrate diff --git a/vendor/github.com/rubenv/sql-migrate/migrate.go b/vendor/github.com/rubenv/sql-migrate/migrate.go new file mode 100644 index 0000000000..8973ee9860 --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/migrate.go @@ -0,0 +1,768 @@ +package migrate + +import ( + "bytes" + "database/sql" + "errors" + "fmt" + "io" + "net/http" + "os" + "path" + "regexp" + "sort" + "strconv" + "strings" + "time" + + "github.com/rubenv/sql-migrate/sqlparse" + "gopkg.in/gorp.v1" +) + +type MigrationDirection int + +const ( + Up MigrationDirection = iota + Down +) + +// MigrationSet provides database parameters for a migration execution +type MigrationSet struct { + // TableName name of the table used to store migration info. + TableName string + // SchemaName schema that the migration table be referenced. + SchemaName string + // IgnoreUnknown skips the check to see if there is a migration + // ran in the database that is not in MigrationSource. + // + // This should be used sparingly as it is removing a safety check. + IgnoreUnknown bool +} + +var migSet = MigrationSet{} + +// NewMigrationSet returns a parametrized Migration object +func (ms MigrationSet) getTableName() string { + if ms.TableName == "" { + return "gorp_migrations" + } + return ms.TableName +} + +var numberPrefixRegex = regexp.MustCompile(`^(\d+).*$`) + +// PlanError happens where no migration plan could be created between the sets +// of already applied migrations and the currently found. For example, when the database +// contains a migration which is not among the migrations list found for an operation. +type PlanError struct { + Migration *Migration + ErrorMessage string +} + +func newPlanError(migration *Migration, errorMessage string) error { + return &PlanError{ + Migration: migration, + ErrorMessage: errorMessage, + } +} + +func (p *PlanError) Error() string { + return fmt.Sprintf("Unable to create migration plan because of %s: %s", + p.Migration.Id, p.ErrorMessage) +} + +// TxError is returned when any error is encountered during a database +// transaction. It contains the relevant *Migration and notes it's Id in the +// Error function output. +type TxError struct { + Migration *Migration + Err error +} + +func newTxError(migration *PlannedMigration, err error) error { + return &TxError{ + Migration: migration.Migration, + Err: err, + } +} + +func (e *TxError) Error() string { + return e.Err.Error() + " handling " + e.Migration.Id +} + +// Set the name of the table used to store migration info. +// +// Should be called before any other call such as (Exec, ExecMax, ...). +func SetTable(name string) { + if name != "" { + migSet.TableName = name + } +} + +// SetSchema sets the name of a schema that the migration table be referenced. +func SetSchema(name string) { + if name != "" { + migSet.SchemaName = name + } +} + +// SetIgnoreUnknown sets the flag that skips database check to see if there is a +// migration in the database that is not in migration source. +// +// This should be used sparingly as it is removing a safety check. +func SetIgnoreUnknown(v bool) { + migSet.IgnoreUnknown = v +} + +type Migration struct { + Id string + Up []string + Down []string + + DisableTransactionUp bool + DisableTransactionDown bool +} + +func (m Migration) Less(other *Migration) bool { + switch { + case m.isNumeric() && other.isNumeric() && m.VersionInt() != other.VersionInt(): + return m.VersionInt() < other.VersionInt() + case m.isNumeric() && !other.isNumeric(): + return true + case !m.isNumeric() && other.isNumeric(): + return false + default: + return m.Id < other.Id + } +} + +func (m Migration) isNumeric() bool { + return len(m.NumberPrefixMatches()) > 0 +} + +func (m Migration) NumberPrefixMatches() []string { + return numberPrefixRegex.FindStringSubmatch(m.Id) +} + +func (m Migration) VersionInt() int64 { + v := m.NumberPrefixMatches()[1] + value, err := strconv.ParseInt(v, 10, 64) + if err != nil { + panic(fmt.Sprintf("Could not parse %q into int64: %s", v, err)) + } + return value +} + +type PlannedMigration struct { + *Migration + + DisableTransaction bool + Queries []string +} + +type byId []*Migration + +func (b byId) Len() int { return len(b) } +func (b byId) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byId) Less(i, j int) bool { return b[i].Less(b[j]) } + +type MigrationRecord struct { + Id string `db:"id"` + AppliedAt time.Time `db:"applied_at"` +} + +type OracleDialect struct { + gorp.OracleDialect +} + +func (d OracleDialect) IfTableNotExists(command, schema, table string) string { + return command +} + +func (d OracleDialect) IfSchemaNotExists(command, schema string) string { + return command +} + +func (d OracleDialect) IfTableExists(command, schema, table string) string { + return command +} + +var MigrationDialects = map[string]gorp.Dialect{ + "sqlite3": gorp.SqliteDialect{}, + "postgres": gorp.PostgresDialect{}, + "mysql": gorp.MySQLDialect{Engine: "InnoDB", Encoding: "UTF8"}, + "mssql": gorp.SqlServerDialect{}, + "oci8": OracleDialect{}, + "godror": OracleDialect{}, +} + +type MigrationSource interface { + // Finds the migrations. + // + // The resulting slice of migrations should be sorted by Id. + FindMigrations() ([]*Migration, error) +} + +// A hardcoded set of migrations, in-memory. +type MemoryMigrationSource struct { + Migrations []*Migration +} + +var _ MigrationSource = (*MemoryMigrationSource)(nil) + +func (m MemoryMigrationSource) FindMigrations() ([]*Migration, error) { + // Make sure migrations are sorted. In order to make the MemoryMigrationSource safe for + // concurrent use we should not mutate it in place. So `FindMigrations` would sort a copy + // of the m.Migrations. + migrations := make([]*Migration, len(m.Migrations)) + copy(migrations, m.Migrations) + sort.Sort(byId(migrations)) + return migrations, nil +} + +// A set of migrations loaded from an http.FileServer + +type HttpFileSystemMigrationSource struct { + FileSystem http.FileSystem +} + +var _ MigrationSource = (*HttpFileSystemMigrationSource)(nil) + +func (f HttpFileSystemMigrationSource) FindMigrations() ([]*Migration, error) { + return findMigrations(f.FileSystem, "/") +} + +// A set of migrations loaded from a directory. +type FileMigrationSource struct { + Dir string +} + +var _ MigrationSource = (*FileMigrationSource)(nil) + +func (f FileMigrationSource) FindMigrations() ([]*Migration, error) { + filesystem := http.Dir(f.Dir) + return findMigrations(filesystem, "/") +} + +func findMigrations(dir http.FileSystem, root string) ([]*Migration, error) { + migrations := make([]*Migration, 0) + + file, err := dir.Open(root) + if err != nil { + return nil, err + } + + files, err := file.Readdir(0) + if err != nil { + return nil, err + } + + for _, info := range files { + if strings.HasSuffix(info.Name(), ".sql") { + migration, err := migrationFromFile(dir, root, info) + if err != nil { + return nil, err + } + + migrations = append(migrations, migration) + } + } + + // Make sure migrations are sorted + sort.Sort(byId(migrations)) + + return migrations, nil +} + +func migrationFromFile(dir http.FileSystem, root string, info os.FileInfo) (*Migration, error) { + path := path.Join(root, info.Name()) + file, err := dir.Open(path) + if err != nil { + return nil, fmt.Errorf("Error while opening %s: %s", info.Name(), err) + } + defer func() { _ = file.Close() }() + + migration, err := ParseMigration(info.Name(), file) + if err != nil { + return nil, fmt.Errorf("Error while parsing %s: %s", info.Name(), err) + } + return migration, nil +} + +// Migrations from a bindata asset set. +type AssetMigrationSource struct { + // Asset should return content of file in path if exists + Asset func(path string) ([]byte, error) + + // AssetDir should return list of files in the path + AssetDir func(path string) ([]string, error) + + // Path in the bindata to use. + Dir string +} + +var _ MigrationSource = (*AssetMigrationSource)(nil) + +func (a AssetMigrationSource) FindMigrations() ([]*Migration, error) { + migrations := make([]*Migration, 0) + + files, err := a.AssetDir(a.Dir) + if err != nil { + return nil, err + } + + for _, name := range files { + if strings.HasSuffix(name, ".sql") { + file, err := a.Asset(path.Join(a.Dir, name)) + if err != nil { + return nil, err + } + + migration, err := ParseMigration(name, bytes.NewReader(file)) + if err != nil { + return nil, err + } + + migrations = append(migrations, migration) + } + } + + // Make sure migrations are sorted + sort.Sort(byId(migrations)) + + return migrations, nil +} + +// Avoids pulling in the packr library for everyone, mimicks the bits of +// packr.Box that we need. +type PackrBox interface { + List() []string + Find(name string) ([]byte, error) +} + +// Migrations from a packr box. +type PackrMigrationSource struct { + Box PackrBox + + // Path in the box to use. + Dir string +} + +var _ MigrationSource = (*PackrMigrationSource)(nil) + +func (p PackrMigrationSource) FindMigrations() ([]*Migration, error) { + migrations := make([]*Migration, 0) + items := p.Box.List() + + prefix := "" + dir := path.Clean(p.Dir) + if dir != "." { + prefix = fmt.Sprintf("%s/", dir) + } + + for _, item := range items { + if !strings.HasPrefix(item, prefix) { + continue + } + name := strings.TrimPrefix(item, prefix) + if strings.Contains(name, "/") { + continue + } + + if strings.HasSuffix(name, ".sql") { + file, err := p.Box.Find(item) + if err != nil { + return nil, err + } + + migration, err := ParseMigration(name, bytes.NewReader(file)) + if err != nil { + return nil, err + } + + migrations = append(migrations, migration) + } + } + + // Make sure migrations are sorted + sort.Sort(byId(migrations)) + + return migrations, nil +} + +// Migration parsing +func ParseMigration(id string, r io.ReadSeeker) (*Migration, error) { + m := &Migration{ + Id: id, + } + + parsed, err := sqlparse.ParseMigration(r) + if err != nil { + return nil, fmt.Errorf("Error parsing migration (%s): %s", id, err) + } + + m.Up = parsed.UpStatements + m.Down = parsed.DownStatements + + m.DisableTransactionUp = parsed.DisableTransactionUp + m.DisableTransactionDown = parsed.DisableTransactionDown + + return m, nil +} + +type SqlExecutor interface { + Exec(query string, args ...interface{}) (sql.Result, error) + Insert(list ...interface{}) error + Delete(list ...interface{}) (int64, error) +} + +// Execute a set of migrations +// +// Returns the number of applied migrations. +func Exec(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection) (int, error) { + return ExecMax(db, dialect, m, dir, 0) +} + +// Returns the number of applied migrations. +func (ms MigrationSet) Exec(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection) (int, error) { + return ms.ExecMax(db, dialect, m, dir, 0) +} + +// Execute a set of migrations +// +// Will apply at most `max` migrations. Pass 0 for no limit (or use Exec). +// +// Returns the number of applied migrations. +func ExecMax(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, max int) (int, error) { + return migSet.ExecMax(db, dialect, m, dir, max) +} + +// Returns the number of applied migrations. +func (ms MigrationSet) ExecMax(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, max int) (int, error) { + migrations, dbMap, err := ms.PlanMigration(db, dialect, m, dir, max) + if err != nil { + return 0, err + } + + // Apply migrations + applied := 0 + for _, migration := range migrations { + var executor SqlExecutor + + if migration.DisableTransaction { + executor = dbMap + } else { + executor, err = dbMap.Begin() + if err != nil { + return applied, newTxError(migration, err) + } + } + + for _, stmt := range migration.Queries { + // remove the semicolon from stmt, fix ORA-00922 issue in database oracle + stmt = strings.TrimSuffix(stmt, "\n") + stmt = strings.TrimSuffix(stmt, " ") + stmt = strings.TrimSuffix(stmt, ";") + if _, err := executor.Exec(stmt); err != nil { + if trans, ok := executor.(*gorp.Transaction); ok { + _ = trans.Rollback() + } + + return applied, newTxError(migration, err) + } + } + + switch dir { + case Up: + err = executor.Insert(&MigrationRecord{ + Id: migration.Id, + AppliedAt: time.Now(), + }) + if err != nil { + if trans, ok := executor.(*gorp.Transaction); ok { + _ = trans.Rollback() + } + + return applied, newTxError(migration, err) + } + case Down: + _, err := executor.Delete(&MigrationRecord{ + Id: migration.Id, + }) + if err != nil { + if trans, ok := executor.(*gorp.Transaction); ok { + _ = trans.Rollback() + } + + return applied, newTxError(migration, err) + } + default: + panic("Not possible") + } + + if trans, ok := executor.(*gorp.Transaction); ok { + if err := trans.Commit(); err != nil { + return applied, newTxError(migration, err) + } + } + + applied++ + } + + return applied, nil +} + +// Plan a migration. +func PlanMigration(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, max int) ([]*PlannedMigration, *gorp.DbMap, error) { + return migSet.PlanMigration(db, dialect, m, dir, max) +} + +func (ms MigrationSet) PlanMigration(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, max int) ([]*PlannedMigration, *gorp.DbMap, error) { + dbMap, err := ms.getMigrationDbMap(db, dialect) + if err != nil { + return nil, nil, err + } + + migrations, err := m.FindMigrations() + if err != nil { + return nil, nil, err + } + + var migrationRecords []MigrationRecord + _, err = dbMap.Select(&migrationRecords, fmt.Sprintf("SELECT * FROM %s", dbMap.Dialect.QuotedTableForQuery(ms.SchemaName, ms.getTableName()))) + if err != nil { + return nil, nil, err + } + + // Sort migrations that have been run by Id. + var existingMigrations []*Migration + for _, migrationRecord := range migrationRecords { + existingMigrations = append(existingMigrations, &Migration{ + Id: migrationRecord.Id, + }) + } + sort.Sort(byId(existingMigrations)) + + // Make sure all migrations in the database are among the found migrations which + // are to be applied. + if !ms.IgnoreUnknown { + migrationsSearch := make(map[string]struct{}) + for _, migration := range migrations { + migrationsSearch[migration.Id] = struct{}{} + } + for _, existingMigration := range existingMigrations { + if _, ok := migrationsSearch[existingMigration.Id]; !ok { + return nil, nil, newPlanError(existingMigration, "unknown migration in database") + } + } + } + + // Get last migration that was run + record := &Migration{} + if len(existingMigrations) > 0 { + record = existingMigrations[len(existingMigrations)-1] + } + + result := make([]*PlannedMigration, 0) + + // Add missing migrations up to the last run migration. + // This can happen for example when merges happened. + if len(existingMigrations) > 0 { + result = append(result, ToCatchup(migrations, existingMigrations, record)...) + } + + // Figure out which migrations to apply + toApply := ToApply(migrations, record.Id, dir) + toApplyCount := len(toApply) + if max > 0 && max < toApplyCount { + toApplyCount = max + } + for _, v := range toApply[0:toApplyCount] { + + if dir == Up { + result = append(result, &PlannedMigration{ + Migration: v, + Queries: v.Up, + DisableTransaction: v.DisableTransactionUp, + }) + } else if dir == Down { + result = append(result, &PlannedMigration{ + Migration: v, + Queries: v.Down, + DisableTransaction: v.DisableTransactionDown, + }) + } + } + + return result, dbMap, nil +} + +// Skip a set of migrations +// +// Will skip at most `max` migrations. Pass 0 for no limit. +// +// Returns the number of skipped migrations. +func SkipMax(db *sql.DB, dialect string, m MigrationSource, dir MigrationDirection, max int) (int, error) { + migrations, dbMap, err := PlanMigration(db, dialect, m, dir, max) + if err != nil { + return 0, err + } + + // Skip migrations + applied := 0 + for _, migration := range migrations { + var executor SqlExecutor + + if migration.DisableTransaction { + executor = dbMap + } else { + executor, err = dbMap.Begin() + if err != nil { + return applied, newTxError(migration, err) + } + } + + err = executor.Insert(&MigrationRecord{ + Id: migration.Id, + AppliedAt: time.Now(), + }) + if err != nil { + if trans, ok := executor.(*gorp.Transaction); ok { + _ = trans.Rollback() + } + + return applied, newTxError(migration, err) + } + + if trans, ok := executor.(*gorp.Transaction); ok { + if err := trans.Commit(); err != nil { + return applied, newTxError(migration, err) + } + } + + applied++ + } + + return applied, nil +} + +// Filter a slice of migrations into ones that should be applied. +func ToApply(migrations []*Migration, current string, direction MigrationDirection) []*Migration { + var index = -1 + if current != "" { + for index < len(migrations)-1 { + index++ + if migrations[index].Id == current { + break + } + } + } + + if direction == Up { + return migrations[index+1:] + } else if direction == Down { + if index == -1 { + return []*Migration{} + } + + // Add in reverse order + toApply := make([]*Migration, index+1) + for i := 0; i < index+1; i++ { + toApply[index-i] = migrations[i] + } + return toApply + } + + panic("Not possible") +} + +func ToCatchup(migrations, existingMigrations []*Migration, lastRun *Migration) []*PlannedMigration { + missing := make([]*PlannedMigration, 0) + for _, migration := range migrations { + found := false + for _, existing := range existingMigrations { + if existing.Id == migration.Id { + found = true + break + } + } + if !found && migration.Less(lastRun) { + missing = append(missing, &PlannedMigration{ + Migration: migration, + Queries: migration.Up, + DisableTransaction: migration.DisableTransactionUp, + }) + } + } + return missing +} + +func GetMigrationRecords(db *sql.DB, dialect string) ([]*MigrationRecord, error) { + return migSet.GetMigrationRecords(db, dialect) +} + +func (ms MigrationSet) GetMigrationRecords(db *sql.DB, dialect string) ([]*MigrationRecord, error) { + dbMap, err := ms.getMigrationDbMap(db, dialect) + if err != nil { + return nil, err + } + + var records []*MigrationRecord + query := fmt.Sprintf("SELECT * FROM %s ORDER BY %s ASC", dbMap.Dialect.QuotedTableForQuery(ms.SchemaName, ms.getTableName()), dbMap.Dialect.QuoteField("id")) + _, err = dbMap.Select(&records, query) + if err != nil { + return nil, err + } + + return records, nil +} + +func (ms MigrationSet) getMigrationDbMap(db *sql.DB, dialect string) (*gorp.DbMap, error) { + d, ok := MigrationDialects[dialect] + if !ok { + return nil, fmt.Errorf("Unknown dialect: %s", dialect) + } + + // When using the mysql driver, make sure that the parseTime option is + // configured, otherwise it won't map time columns to time.Time. See + // https://github.com/rubenv/sql-migrate/issues/2 + if dialect == "mysql" { + var out *time.Time + err := db.QueryRow("SELECT NOW()").Scan(&out) + if err != nil { + if err.Error() == "sql: Scan error on column index 0: unsupported driver -> Scan pair: []uint8 -> *time.Time" || + err.Error() == "sql: Scan error on column index 0: unsupported Scan, storing driver.Value type []uint8 into type *time.Time" || + err.Error() == "sql: Scan error on column index 0, name \"NOW()\": unsupported Scan, storing driver.Value type []uint8 into type *time.Time" { + return nil, errors.New(`Cannot parse dates. + +Make sure that the parseTime option is supplied to your database connection. +Check https://github.com/go-sql-driver/mysql#parsetime for more info.`) + } else { + return nil, err + } + } + } + + // Create migration database map + dbMap := &gorp.DbMap{Db: db, Dialect: d} + table := dbMap.AddTableWithNameAndSchema(MigrationRecord{}, ms.SchemaName, ms.getTableName()).SetKeys(false, "Id") + //dbMap.TraceOn("", log.New(os.Stdout, "migrate: ", log.Lmicroseconds)) + + if dialect == "oci8" || dialect == "godror" { + table.ColMap("Id").SetMaxSize(4000) + } + + err := dbMap.CreateTablesIfNotExists() + if err != nil { + // Oracle database does not support `if not exists`, so use `ORA-00955:` error code + // to check if the table exists. + if (dialect == "oci8" || dialect == "godror") && strings.Contains(err.Error(), "ORA-00955:") { + return dbMap, nil + } + return nil, err + } + + return dbMap, nil +} + +// TODO: Run migration + record insert in transaction. diff --git a/vendor/github.com/rubenv/sql-migrate/migrate_go116.go b/vendor/github.com/rubenv/sql-migrate/migrate_go116.go new file mode 100644 index 0000000000..e646098cf4 --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/migrate_go116.go @@ -0,0 +1,22 @@ +// +build go1.16 + +package migrate + +import ( + "embed" + "net/http" +) + +// A set of migrations loaded from an go1.16 embed.FS + +type EmbedFileSystemMigrationSource struct { + FileSystem embed.FS + + Root string +} + +var _ MigrationSource = (*EmbedFileSystemMigrationSource)(nil) + +func (f EmbedFileSystemMigrationSource) FindMigrations() ([]*Migration, error) { + return findMigrations(http.FS(f.FileSystem), f.Root) +} diff --git a/vendor/github.com/rubenv/sql-migrate/sqlparse/LICENSE b/vendor/github.com/rubenv/sql-migrate/sqlparse/LICENSE new file mode 100644 index 0000000000..9c12525138 --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/sqlparse/LICENSE @@ -0,0 +1,22 @@ +MIT License + +Copyright (C) 2014-2017 by Ruben Vermeersch +Copyright (C) 2012-2014 by Liam Staskawicz + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/rubenv/sql-migrate/sqlparse/README.md b/vendor/github.com/rubenv/sql-migrate/sqlparse/README.md new file mode 100644 index 0000000000..fa5341abdb --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/sqlparse/README.md @@ -0,0 +1,7 @@ +# SQL migration parser + +Based on the [goose](https://bitbucket.org/liamstask/goose) migration parser. + +## License + +This library is distributed under the [MIT](LICENSE) license. diff --git a/vendor/github.com/rubenv/sql-migrate/sqlparse/sqlparse.go b/vendor/github.com/rubenv/sql-migrate/sqlparse/sqlparse.go new file mode 100644 index 0000000000..d336e772ad --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/sqlparse/sqlparse.go @@ -0,0 +1,235 @@ +package sqlparse + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + + "strings" +) + +const ( + sqlCmdPrefix = "-- +migrate " + optionNoTransaction = "notransaction" +) + +type ParsedMigration struct { + UpStatements []string + DownStatements []string + + DisableTransactionUp bool + DisableTransactionDown bool +} + +var ( + // LineSeparator can be used to split migrations by an exact line match. This line + // will be removed from the output. If left blank, it is not considered. It is defaulted + // to blank so you will have to set it manually. + // Use case: in MSSQL, it is convenient to separate commands by GO statements like in + // SQL Query Analyzer. + LineSeparator = "" +) + +func errNoTerminator() error { + if len(LineSeparator) == 0 { + return errors.New(`ERROR: The last statement must be ended by a semicolon or '-- +migrate StatementEnd' marker. + See https://github.com/rubenv/sql-migrate for details.`) + } + + return errors.New(fmt.Sprintf(`ERROR: The last statement must be ended by a semicolon, a line whose contents are %q, or '-- +migrate StatementEnd' marker. + See https://github.com/rubenv/sql-migrate for details.`, LineSeparator)) +} + +// Checks the line to see if the line has a statement-ending semicolon +// or if the line contains a double-dash comment. +func endsWithSemicolon(line string) bool { + + prev := "" + scanner := bufio.NewScanner(strings.NewReader(line)) + scanner.Split(bufio.ScanWords) + + for scanner.Scan() { + word := scanner.Text() + if strings.HasPrefix(word, "--") { + break + } + prev = word + } + + return strings.HasSuffix(prev, ";") +} + +type migrationDirection int + +const ( + directionNone migrationDirection = iota + directionUp + directionDown +) + +type migrateCommand struct { + Command string + Options []string +} + +func (c *migrateCommand) HasOption(opt string) bool { + for _, specifiedOption := range c.Options { + if specifiedOption == opt { + return true + } + } + + return false +} + +func parseCommand(line string) (*migrateCommand, error) { + cmd := &migrateCommand{} + + if !strings.HasPrefix(line, sqlCmdPrefix) { + return nil, errors.New("ERROR: not a sql-migrate command") + } + + fields := strings.Fields(line[len(sqlCmdPrefix):]) + if len(fields) == 0 { + return nil, errors.New(`ERROR: incomplete migration command`) + } + + cmd.Command = fields[0] + + cmd.Options = fields[1:] + + return cmd, nil +} + +// Split the given sql script into individual statements. +// +// The base case is to simply split on semicolons, as these +// naturally terminate a statement. +// +// However, more complex cases like pl/pgsql can have semicolons +// within a statement. For these cases, we provide the explicit annotations +// 'StatementBegin' and 'StatementEnd' to allow the script to +// tell us to ignore semicolons. +func ParseMigration(r io.ReadSeeker) (*ParsedMigration, error) { + p := &ParsedMigration{} + + _, err := r.Seek(0, 0) + if err != nil { + return nil, err + } + + var buf bytes.Buffer + scanner := bufio.NewScanner(r) + scanner.Buffer(make([]byte, 0, 64*1024), 1024*1024) + + statementEnded := false + ignoreSemicolons := false + currentDirection := directionNone + + for scanner.Scan() { + line := scanner.Text() + // ignore comment except beginning with '-- +' + if strings.HasPrefix(line, "-- ") && !strings.HasPrefix(line, "-- +") { + continue + } + + // handle any migrate-specific commands + if strings.HasPrefix(line, sqlCmdPrefix) { + cmd, err := parseCommand(line) + if err != nil { + return nil, err + } + + switch cmd.Command { + case "Up": + if len(strings.TrimSpace(buf.String())) > 0 { + return nil, errNoTerminator() + } + currentDirection = directionUp + if cmd.HasOption(optionNoTransaction) { + p.DisableTransactionUp = true + } + break + + case "Down": + if len(strings.TrimSpace(buf.String())) > 0 { + return nil, errNoTerminator() + } + currentDirection = directionDown + if cmd.HasOption(optionNoTransaction) { + p.DisableTransactionDown = true + } + break + + case "StatementBegin": + if currentDirection != directionNone { + ignoreSemicolons = true + } + break + + case "StatementEnd": + if currentDirection != directionNone { + statementEnded = (ignoreSemicolons == true) + ignoreSemicolons = false + } + break + } + } + + if currentDirection == directionNone { + continue + } + + isLineSeparator := !ignoreSemicolons && len(LineSeparator) > 0 && line == LineSeparator + + if !isLineSeparator && !strings.HasPrefix(line, "-- +") { + if _, err := buf.WriteString(line + "\n"); err != nil { + return nil, err + } + } + + // Wrap up the two supported cases: 1) basic with semicolon; 2) psql statement + // Lines that end with semicolon that are in a statement block + // do not conclude statement. + if (!ignoreSemicolons && (endsWithSemicolon(line) || isLineSeparator)) || statementEnded { + statementEnded = false + switch currentDirection { + case directionUp: + p.UpStatements = append(p.UpStatements, buf.String()) + + case directionDown: + p.DownStatements = append(p.DownStatements, buf.String()) + + default: + panic("impossible state") + } + + buf.Reset() + } + } + + if err := scanner.Err(); err != nil { + return nil, err + } + + // diagnose likely migration script errors + if ignoreSemicolons { + return nil, errors.New("ERROR: saw '-- +migrate StatementBegin' with no matching '-- +migrate StatementEnd'") + } + + if currentDirection == directionNone { + return nil, errors.New(`ERROR: no Up/Down annotations found, so no statements were executed. + See https://github.com/rubenv/sql-migrate for details.`) + } + + // allow comment without sql instruction. Example: + // -- +migrate Down + // -- nothing to downgrade! + if len(strings.TrimSpace(buf.String())) > 0 && !strings.HasPrefix(buf.String(), "-- +") { + return nil, errNoTerminator() + } + + return p, nil +} diff --git a/vendor/github.com/rubenv/sql-migrate/test-migrations/1_initial.sql b/vendor/github.com/rubenv/sql-migrate/test-migrations/1_initial.sql new file mode 100644 index 0000000000..cd896fbd7b --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/test-migrations/1_initial.sql @@ -0,0 +1,8 @@ +-- +migrate Up +-- SQL in section 'Up' is executed when this migration is applied +CREATE TABLE people (id int); + + +-- +migrate Down +-- SQL section 'Down' is executed when this migration is rolled back +DROP TABLE people; diff --git a/vendor/github.com/rubenv/sql-migrate/test-migrations/2_record.sql b/vendor/github.com/rubenv/sql-migrate/test-migrations/2_record.sql new file mode 100644 index 0000000000..c76d7691bf --- /dev/null +++ b/vendor/github.com/rubenv/sql-migrate/test-migrations/2_record.sql @@ -0,0 +1,5 @@ +-- +migrate Up +INSERT INTO people (id) VALUES (1); + +-- +migrate Down +DELETE FROM people WHERE id=1; diff --git a/vendor/github.com/russross/blackfriday/.gitignore b/vendor/github.com/russross/blackfriday/.gitignore new file mode 100644 index 0000000000..75623dcccb --- /dev/null +++ b/vendor/github.com/russross/blackfriday/.gitignore @@ -0,0 +1,8 @@ +*.out +*.swp +*.8 +*.6 +_obj +_test* +markdown +tags diff --git a/vendor/github.com/russross/blackfriday/.travis.yml b/vendor/github.com/russross/blackfriday/.travis.yml new file mode 100644 index 0000000000..2f3351d7ae --- /dev/null +++ b/vendor/github.com/russross/blackfriday/.travis.yml @@ -0,0 +1,17 @@ +sudo: false +language: go +go: + - "1.9.x" + - "1.10.x" + - tip +matrix: + fast_finish: true + allow_failures: + - go: tip +install: + - # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step). +script: + - go get -t -v ./... + - diff -u <(echo -n) <(gofmt -d -s .) + - go tool vet . + - go test -v -race ./... diff --git a/vendor/github.com/russross/blackfriday/LICENSE.txt b/vendor/github.com/russross/blackfriday/LICENSE.txt new file mode 100644 index 0000000000..2885af3602 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/LICENSE.txt @@ -0,0 +1,29 @@ +Blackfriday is distributed under the Simplified BSD License: + +> Copyright © 2011 Russ Ross +> All rights reserved. +> +> Redistribution and use in source and binary forms, with or without +> modification, are permitted provided that the following conditions +> are met: +> +> 1. Redistributions of source code must retain the above copyright +> notice, this list of conditions and the following disclaimer. +> +> 2. Redistributions in binary form must reproduce the above +> copyright notice, this list of conditions and the following +> disclaimer in the documentation and/or other materials provided with +> the distribution. +> +> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +> "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +> LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +> FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +> COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +> INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +> BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +> LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +> CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +> LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +> ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +> POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/russross/blackfriday/README.md b/vendor/github.com/russross/blackfriday/README.md new file mode 100644 index 0000000000..3c62e13753 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/README.md @@ -0,0 +1,369 @@ +Blackfriday +[![Build Status][BuildSVG]][BuildURL] +[![Godoc][GodocV2SVG]][GodocV2URL] +=========== + +Blackfriday is a [Markdown][1] processor implemented in [Go][2]. It +is paranoid about its input (so you can safely feed it user-supplied +data), it is fast, it supports common extensions (tables, smart +punctuation substitutions, etc.), and it is safe for all utf-8 +(unicode) input. + +HTML output is currently supported, along with Smartypants +extensions. + +It started as a translation from C of [Sundown][3]. + + +Installation +------------ + +Blackfriday is compatible with any modern Go release. With Go and git installed: + + go get -u gopkg.in/russross/blackfriday.v2 + +will download, compile, and install the package into your `$GOPATH` directory +hierarchy. + + +Versions +-------- + +Currently maintained and recommended version of Blackfriday is `v2`. It's being +developed on its own branch: https://github.com/russross/blackfriday/tree/v2 and the +documentation is available at +https://godoc.org/gopkg.in/russross/blackfriday.v2. + +It is `go get`-able via [gopkg.in][6] at `gopkg.in/russross/blackfriday.v2`, +but we highly recommend using package management tool like [dep][7] or +[Glide][8] and make use of semantic versioning. With package management you +should import `github.com/russross/blackfriday` and specify that you're using +version 2.0.0. + +Version 2 offers a number of improvements over v1: + +* Cleaned up API +* A separate call to [`Parse`][4], which produces an abstract syntax tree for + the document +* Latest bug fixes +* Flexibility to easily add your own rendering extensions + +Potential drawbacks: + +* Our benchmarks show v2 to be slightly slower than v1. Currently in the + ballpark of around 15%. +* API breakage. If you can't afford modifying your code to adhere to the new API + and don't care too much about the new features, v2 is probably not for you. +* Several bug fixes are trailing behind and still need to be forward-ported to + v2. See issue [#348](https://github.com/russross/blackfriday/issues/348) for + tracking. + +If you are still interested in the legacy `v1`, you can import it from +`github.com/russross/blackfriday`. Documentation for the legacy v1 can be found +here: https://godoc.org/github.com/russross/blackfriday + +### Known issue with `dep` + +There is a known problem with using Blackfriday v1 _transitively_ and `dep`. +Currently `dep` prioritizes semver versions over anything else, and picks the +latest one, plus it does not apply a `[[constraint]]` specifier to transitively +pulled in packages. So if you're using something that uses Blackfriday v1, but +that something does not use `dep` yet, you will get Blackfriday v2 pulled in and +your first dependency will fail to build. + +There are couple of fixes for it, documented here: +https://github.com/golang/dep/blob/master/docs/FAQ.md#how-do-i-constrain-a-transitive-dependencys-version + +Meanwhile, `dep` team is working on a more general solution to the constraints +on transitive dependencies problem: https://github.com/golang/dep/issues/1124. + + +Usage +----- + +### v1 + +For basic usage, it is as simple as getting your input into a byte +slice and calling: + + output := blackfriday.MarkdownBasic(input) + +This renders it with no extensions enabled. To get a more useful +feature set, use this instead: + + output := blackfriday.MarkdownCommon(input) + +### v2 + +For the most sensible markdown processing, it is as simple as getting your input +into a byte slice and calling: + +```go +output := blackfriday.Run(input) +``` + +Your input will be parsed and the output rendered with a set of most popular +extensions enabled. If you want the most basic feature set, corresponding with +the bare Markdown specification, use: + +```go +output := blackfriday.Run(input, blackfriday.WithNoExtensions()) +``` + +### Sanitize untrusted content + +Blackfriday itself does nothing to protect against malicious content. If you are +dealing with user-supplied markdown, we recommend running Blackfriday's output +through HTML sanitizer such as [Bluemonday][5]. + +Here's an example of simple usage of Blackfriday together with Bluemonday: + +```go +import ( + "github.com/microcosm-cc/bluemonday" + "gopkg.in/russross/blackfriday.v2" +) + +// ... +unsafe := blackfriday.Run(input) +html := bluemonday.UGCPolicy().SanitizeBytes(unsafe) +``` + +### Custom options, v1 + +If you want to customize the set of options, first get a renderer +(currently only the HTML output engine), then use it to +call the more general `Markdown` function. For examples, see the +implementations of `MarkdownBasic` and `MarkdownCommon` in +`markdown.go`. + +### Custom options, v2 + +If you want to customize the set of options, use `blackfriday.WithExtensions`, +`blackfriday.WithRenderer` and `blackfriday.WithRefOverride`. + +### `blackfriday-tool` + +You can also check out `blackfriday-tool` for a more complete example +of how to use it. Download and install it using: + + go get github.com/russross/blackfriday-tool + +This is a simple command-line tool that allows you to process a +markdown file using a standalone program. You can also browse the +source directly on github if you are just looking for some example +code: + +* + +Note that if you have not already done so, installing +`blackfriday-tool` will be sufficient to download and install +blackfriday in addition to the tool itself. The tool binary will be +installed in `$GOPATH/bin`. This is a statically-linked binary that +can be copied to wherever you need it without worrying about +dependencies and library versions. + +### Sanitized anchor names + +Blackfriday includes an algorithm for creating sanitized anchor names +corresponding to a given input text. This algorithm is used to create +anchors for headings when `EXTENSION_AUTO_HEADER_IDS` is enabled. The +algorithm has a specification, so that other packages can create +compatible anchor names and links to those anchors. + +The specification is located at https://godoc.org/github.com/russross/blackfriday#hdr-Sanitized_Anchor_Names. + +[`SanitizedAnchorName`](https://godoc.org/github.com/russross/blackfriday#SanitizedAnchorName) exposes this functionality, and can be used to +create compatible links to the anchor names generated by blackfriday. +This algorithm is also implemented in a small standalone package at +[`github.com/shurcooL/sanitized_anchor_name`](https://godoc.org/github.com/shurcooL/sanitized_anchor_name). It can be useful for clients +that want a small package and don't need full functionality of blackfriday. + + +Features +-------- + +All features of Sundown are supported, including: + +* **Compatibility**. The Markdown v1.0.3 test suite passes with + the `--tidy` option. Without `--tidy`, the differences are + mostly in whitespace and entity escaping, where blackfriday is + more consistent and cleaner. + +* **Common extensions**, including table support, fenced code + blocks, autolinks, strikethroughs, non-strict emphasis, etc. + +* **Safety**. Blackfriday is paranoid when parsing, making it safe + to feed untrusted user input without fear of bad things + happening. The test suite stress tests this and there are no + known inputs that make it crash. If you find one, please let me + know and send me the input that does it. + + NOTE: "safety" in this context means *runtime safety only*. In order to + protect yourself against JavaScript injection in untrusted content, see + [this example](https://github.com/russross/blackfriday#sanitize-untrusted-content). + +* **Fast processing**. It is fast enough to render on-demand in + most web applications without having to cache the output. + +* **Thread safety**. You can run multiple parsers in different + goroutines without ill effect. There is no dependence on global + shared state. + +* **Minimal dependencies**. Blackfriday only depends on standard + library packages in Go. The source code is pretty + self-contained, so it is easy to add to any project, including + Google App Engine projects. + +* **Standards compliant**. Output successfully validates using the + W3C validation tool for HTML 4.01 and XHTML 1.0 Transitional. + + +Extensions +---------- + +In addition to the standard markdown syntax, this package +implements the following extensions: + +* **Intra-word emphasis supression**. The `_` character is + commonly used inside words when discussing code, so having + markdown interpret it as an emphasis command is usually the + wrong thing. Blackfriday lets you treat all emphasis markers as + normal characters when they occur inside a word. + +* **Tables**. Tables can be created by drawing them in the input + using a simple syntax: + + ``` + Name | Age + --------|------ + Bob | 27 + Alice | 23 + ``` + +* **Fenced code blocks**. In addition to the normal 4-space + indentation to mark code blocks, you can explicitly mark them + and supply a language (to make syntax highlighting simple). Just + mark it like this: + + ``` go + func getTrue() bool { + return true + } + ``` + + You can use 3 or more backticks to mark the beginning of the + block, and the same number to mark the end of the block. + + To preserve classes of fenced code blocks while using the bluemonday + HTML sanitizer, use the following policy: + + ``` go + p := bluemonday.UGCPolicy() + p.AllowAttrs("class").Matching(regexp.MustCompile("^language-[a-zA-Z0-9]+$")).OnElements("code") + html := p.SanitizeBytes(unsafe) + ``` + +* **Definition lists**. A simple definition list is made of a single-line + term followed by a colon and the definition for that term. + + Cat + : Fluffy animal everyone likes + + Internet + : Vector of transmission for pictures of cats + + Terms must be separated from the previous definition by a blank line. + +* **Footnotes**. A marker in the text that will become a superscript number; + a footnote definition that will be placed in a list of footnotes at the + end of the document. A footnote looks like this: + + This is a footnote.[^1] + + [^1]: the footnote text. + +* **Autolinking**. Blackfriday can find URLs that have not been + explicitly marked as links and turn them into links. + +* **Strikethrough**. Use two tildes (`~~`) to mark text that + should be crossed out. + +* **Hard line breaks**. With this extension enabled (it is off by + default in the `MarkdownBasic` and `MarkdownCommon` convenience + functions), newlines in the input translate into line breaks in + the output. + +* **Smart quotes**. Smartypants-style punctuation substitution is + supported, turning normal double- and single-quote marks into + curly quotes, etc. + +* **LaTeX-style dash parsing** is an additional option, where `--` + is translated into `–`, and `---` is translated into + `—`. This differs from most smartypants processors, which + turn a single hyphen into an ndash and a double hyphen into an + mdash. + +* **Smart fractions**, where anything that looks like a fraction + is translated into suitable HTML (instead of just a few special + cases like most smartypant processors). For example, `4/5` + becomes `45`, which renders as + 45. + + +Other renderers +--------------- + +Blackfriday is structured to allow alternative rendering engines. Here +are a few of note: + +* [github_flavored_markdown](https://godoc.org/github.com/shurcooL/github_flavored_markdown): + provides a GitHub Flavored Markdown renderer with fenced code block + highlighting, clickable heading anchor links. + + It's not customizable, and its goal is to produce HTML output + equivalent to the [GitHub Markdown API endpoint](https://developer.github.com/v3/markdown/#render-a-markdown-document-in-raw-mode), + except the rendering is performed locally. + +* [markdownfmt](https://github.com/shurcooL/markdownfmt): like gofmt, + but for markdown. + +* [LaTeX output](https://bitbucket.org/ambrevar/blackfriday-latex): + renders output as LaTeX. + +* [bfchroma](https://github.com/Depado/bfchroma/): provides convenience + integration with the [Chroma](https://github.com/alecthomas/chroma) code + highlighting library. bfchroma is only compatible with v2 of Blackfriday and + provides a drop-in renderer ready to use with Blackfriday, as well as + options and means for further customization. + + +TODO +---- + +* More unit testing +* Improve Unicode support. It does not understand all Unicode + rules (about what constitutes a letter, a punctuation symbol, + etc.), so it may fail to detect word boundaries correctly in + some instances. It is safe on all UTF-8 input. + + +License +------- + +[Blackfriday is distributed under the Simplified BSD License](LICENSE.txt) + + + [1]: https://daringfireball.net/projects/markdown/ "Markdown" + [2]: https://golang.org/ "Go Language" + [3]: https://github.com/vmg/sundown "Sundown" + [4]: https://godoc.org/gopkg.in/russross/blackfriday.v2#Parse "Parse func" + [5]: https://github.com/microcosm-cc/bluemonday "Bluemonday" + [6]: https://labix.org/gopkg.in "gopkg.in" + [7]: https://github.com/golang/dep/ "dep" + [8]: https://github.com/Masterminds/glide "Glide" + + [BuildSVG]: https://travis-ci.org/russross/blackfriday.svg?branch=master + [BuildURL]: https://travis-ci.org/russross/blackfriday + [GodocV2SVG]: https://godoc.org/gopkg.in/russross/blackfriday.v2?status.svg + [GodocV2URL]: https://godoc.org/gopkg.in/russross/blackfriday.v2 diff --git a/vendor/github.com/russross/blackfriday/block.go b/vendor/github.com/russross/blackfriday/block.go new file mode 100644 index 0000000000..45c21a6c26 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/block.go @@ -0,0 +1,1474 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// Functions to parse block-level elements. +// + +package blackfriday + +import ( + "bytes" + "strings" + "unicode" +) + +// Parse block-level data. +// Note: this function and many that it calls assume that +// the input buffer ends with a newline. +func (p *parser) block(out *bytes.Buffer, data []byte) { + if len(data) == 0 || data[len(data)-1] != '\n' { + panic("block input is missing terminating newline") + } + + // this is called recursively: enforce a maximum depth + if p.nesting >= p.maxNesting { + return + } + p.nesting++ + + // parse out one block-level construct at a time + for len(data) > 0 { + // prefixed header: + // + // # Header 1 + // ## Header 2 + // ... + // ###### Header 6 + if p.isPrefixHeader(data) { + data = data[p.prefixHeader(out, data):] + continue + } + + // block of preformatted HTML: + // + //
+ // ... + //
+ if data[0] == '<' { + if i := p.html(out, data, true); i > 0 { + data = data[i:] + continue + } + } + + // title block + // + // % stuff + // % more stuff + // % even more stuff + if p.flags&EXTENSION_TITLEBLOCK != 0 { + if data[0] == '%' { + if i := p.titleBlock(out, data, true); i > 0 { + data = data[i:] + continue + } + } + } + + // blank lines. note: returns the # of bytes to skip + if i := p.isEmpty(data); i > 0 { + data = data[i:] + continue + } + + // indented code block: + // + // func max(a, b int) int { + // if a > b { + // return a + // } + // return b + // } + if p.codePrefix(data) > 0 { + data = data[p.code(out, data):] + continue + } + + // fenced code block: + // + // ``` go info string here + // func fact(n int) int { + // if n <= 1 { + // return n + // } + // return n * fact(n-1) + // } + // ``` + if p.flags&EXTENSION_FENCED_CODE != 0 { + if i := p.fencedCodeBlock(out, data, true); i > 0 { + data = data[i:] + continue + } + } + + // horizontal rule: + // + // ------ + // or + // ****** + // or + // ______ + if p.isHRule(data) { + p.r.HRule(out) + var i int + for i = 0; data[i] != '\n'; i++ { + } + data = data[i:] + continue + } + + // block quote: + // + // > A big quote I found somewhere + // > on the web + if p.quotePrefix(data) > 0 { + data = data[p.quote(out, data):] + continue + } + + // table: + // + // Name | Age | Phone + // ------|-----|--------- + // Bob | 31 | 555-1234 + // Alice | 27 | 555-4321 + if p.flags&EXTENSION_TABLES != 0 { + if i := p.table(out, data); i > 0 { + data = data[i:] + continue + } + } + + // an itemized/unordered list: + // + // * Item 1 + // * Item 2 + // + // also works with + or - + if p.uliPrefix(data) > 0 { + data = data[p.list(out, data, 0):] + continue + } + + // a numbered/ordered list: + // + // 1. Item 1 + // 2. Item 2 + if p.oliPrefix(data) > 0 { + data = data[p.list(out, data, LIST_TYPE_ORDERED):] + continue + } + + // definition lists: + // + // Term 1 + // : Definition a + // : Definition b + // + // Term 2 + // : Definition c + if p.flags&EXTENSION_DEFINITION_LISTS != 0 { + if p.dliPrefix(data) > 0 { + data = data[p.list(out, data, LIST_TYPE_DEFINITION):] + continue + } + } + + // anything else must look like a normal paragraph + // note: this finds underlined headers, too + data = data[p.paragraph(out, data):] + } + + p.nesting-- +} + +func (p *parser) isPrefixHeader(data []byte) bool { + if data[0] != '#' { + return false + } + + if p.flags&EXTENSION_SPACE_HEADERS != 0 { + level := 0 + for level < 6 && data[level] == '#' { + level++ + } + if data[level] != ' ' { + return false + } + } + return true +} + +func (p *parser) prefixHeader(out *bytes.Buffer, data []byte) int { + level := 0 + for level < 6 && data[level] == '#' { + level++ + } + i := skipChar(data, level, ' ') + end := skipUntilChar(data, i, '\n') + skip := end + id := "" + if p.flags&EXTENSION_HEADER_IDS != 0 { + j, k := 0, 0 + // find start/end of header id + for j = i; j < end-1 && (data[j] != '{' || data[j+1] != '#'); j++ { + } + for k = j + 1; k < end && data[k] != '}'; k++ { + } + // extract header id iff found + if j < end && k < end { + id = string(data[j+2 : k]) + end = j + skip = k + 1 + for end > 0 && data[end-1] == ' ' { + end-- + } + } + } + for end > 0 && data[end-1] == '#' { + if isBackslashEscaped(data, end-1) { + break + } + end-- + } + for end > 0 && data[end-1] == ' ' { + end-- + } + if end > i { + if id == "" && p.flags&EXTENSION_AUTO_HEADER_IDS != 0 { + id = SanitizedAnchorName(string(data[i:end])) + } + work := func() bool { + p.inline(out, data[i:end]) + return true + } + p.r.Header(out, work, level, id) + } + return skip +} + +func (p *parser) isUnderlinedHeader(data []byte) int { + // test of level 1 header + if data[0] == '=' { + i := skipChar(data, 1, '=') + i = skipChar(data, i, ' ') + if data[i] == '\n' { + return 1 + } else { + return 0 + } + } + + // test of level 2 header + if data[0] == '-' { + i := skipChar(data, 1, '-') + i = skipChar(data, i, ' ') + if data[i] == '\n' { + return 2 + } else { + return 0 + } + } + + return 0 +} + +func (p *parser) titleBlock(out *bytes.Buffer, data []byte, doRender bool) int { + if data[0] != '%' { + return 0 + } + splitData := bytes.Split(data, []byte("\n")) + var i int + for idx, b := range splitData { + if !bytes.HasPrefix(b, []byte("%")) { + i = idx // - 1 + break + } + } + + data = bytes.Join(splitData[0:i], []byte("\n")) + p.r.TitleBlock(out, data) + + return len(data) +} + +func (p *parser) html(out *bytes.Buffer, data []byte, doRender bool) int { + var i, j int + + // identify the opening tag + if data[0] != '<' { + return 0 + } + curtag, tagfound := p.htmlFindTag(data[1:]) + + // handle special cases + if !tagfound { + // check for an HTML comment + if size := p.htmlComment(out, data, doRender); size > 0 { + return size + } + + // check for an
tag + if size := p.htmlHr(out, data, doRender); size > 0 { + return size + } + + // check for HTML CDATA + if size := p.htmlCDATA(out, data, doRender); size > 0 { + return size + } + + // no special case recognized + return 0 + } + + // look for an unindented matching closing tag + // followed by a blank line + found := false + /* + closetag := []byte("\n") + j = len(curtag) + 1 + for !found { + // scan for a closing tag at the beginning of a line + if skip := bytes.Index(data[j:], closetag); skip >= 0 { + j += skip + len(closetag) + } else { + break + } + + // see if it is the only thing on the line + if skip := p.isEmpty(data[j:]); skip > 0 { + // see if it is followed by a blank line/eof + j += skip + if j >= len(data) { + found = true + i = j + } else { + if skip := p.isEmpty(data[j:]); skip > 0 { + j += skip + found = true + i = j + } + } + } + } + */ + + // if not found, try a second pass looking for indented match + // but not if tag is "ins" or "del" (following original Markdown.pl) + if !found && curtag != "ins" && curtag != "del" { + i = 1 + for i < len(data) { + i++ + for i < len(data) && !(data[i-1] == '<' && data[i] == '/') { + i++ + } + + if i+2+len(curtag) >= len(data) { + break + } + + j = p.htmlFindEnd(curtag, data[i-1:]) + + if j > 0 { + i += j - 1 + found = true + break + } + } + } + + if !found { + return 0 + } + + // the end of the block has been found + if doRender { + // trim newlines + end := i + for end > 0 && data[end-1] == '\n' { + end-- + } + p.r.BlockHtml(out, data[:end]) + } + + return i +} + +func (p *parser) renderHTMLBlock(out *bytes.Buffer, data []byte, start int, doRender bool) int { + // html block needs to end with a blank line + if i := p.isEmpty(data[start:]); i > 0 { + size := start + i + if doRender { + // trim trailing newlines + end := size + for end > 0 && data[end-1] == '\n' { + end-- + } + p.r.BlockHtml(out, data[:end]) + } + return size + } + return 0 +} + +// HTML comment, lax form +func (p *parser) htmlComment(out *bytes.Buffer, data []byte, doRender bool) int { + i := p.inlineHTMLComment(out, data) + return p.renderHTMLBlock(out, data, i, doRender) +} + +// HTML CDATA section +func (p *parser) htmlCDATA(out *bytes.Buffer, data []byte, doRender bool) int { + const cdataTag = "') { + i++ + } + i++ + // no end-of-comment marker + if i >= len(data) { + return 0 + } + return p.renderHTMLBlock(out, data, i, doRender) +} + +// HR, which is the only self-closing block tag considered +func (p *parser) htmlHr(out *bytes.Buffer, data []byte, doRender bool) int { + if data[0] != '<' || (data[1] != 'h' && data[1] != 'H') || (data[2] != 'r' && data[2] != 'R') { + return 0 + } + if data[3] != ' ' && data[3] != '/' && data[3] != '>' { + // not an
tag after all; at least not a valid one + return 0 + } + + i := 3 + for data[i] != '>' && data[i] != '\n' { + i++ + } + + if data[i] == '>' { + return p.renderHTMLBlock(out, data, i+1, doRender) + } + + return 0 +} + +func (p *parser) htmlFindTag(data []byte) (string, bool) { + i := 0 + for isalnum(data[i]) { + i++ + } + key := string(data[:i]) + if _, ok := blockTags[key]; ok { + return key, true + } + return "", false +} + +func (p *parser) htmlFindEnd(tag string, data []byte) int { + // assume data[0] == '<' && data[1] == '/' already tested + + // check if tag is a match + closetag := []byte("") + if !bytes.HasPrefix(data, closetag) { + return 0 + } + i := len(closetag) + + // check that the rest of the line is blank + skip := 0 + if skip = p.isEmpty(data[i:]); skip == 0 { + return 0 + } + i += skip + skip = 0 + + if i >= len(data) { + return i + } + + if p.flags&EXTENSION_LAX_HTML_BLOCKS != 0 { + return i + } + if skip = p.isEmpty(data[i:]); skip == 0 { + // following line must be blank + return 0 + } + + return i + skip +} + +func (*parser) isEmpty(data []byte) int { + // it is okay to call isEmpty on an empty buffer + if len(data) == 0 { + return 0 + } + + var i int + for i = 0; i < len(data) && data[i] != '\n'; i++ { + if data[i] != ' ' && data[i] != '\t' { + return 0 + } + } + return i + 1 +} + +func (*parser) isHRule(data []byte) bool { + i := 0 + + // skip up to three spaces + for i < 3 && data[i] == ' ' { + i++ + } + + // look at the hrule char + if data[i] != '*' && data[i] != '-' && data[i] != '_' { + return false + } + c := data[i] + + // the whole line must be the char or whitespace + n := 0 + for data[i] != '\n' { + switch { + case data[i] == c: + n++ + case data[i] != ' ': + return false + } + i++ + } + + return n >= 3 +} + +// isFenceLine checks if there's a fence line (e.g., ``` or ``` go) at the beginning of data, +// and returns the end index if so, or 0 otherwise. It also returns the marker found. +// If syntax is not nil, it gets set to the syntax specified in the fence line. +// A final newline is mandatory to recognize the fence line, unless newlineOptional is true. +func isFenceLine(data []byte, info *string, oldmarker string, newlineOptional bool) (end int, marker string) { + i, size := 0, 0 + + // skip up to three spaces + for i < len(data) && i < 3 && data[i] == ' ' { + i++ + } + + // check for the marker characters: ~ or ` + if i >= len(data) { + return 0, "" + } + if data[i] != '~' && data[i] != '`' { + return 0, "" + } + + c := data[i] + + // the whole line must be the same char or whitespace + for i < len(data) && data[i] == c { + size++ + i++ + } + + // the marker char must occur at least 3 times + if size < 3 { + return 0, "" + } + marker = string(data[i-size : i]) + + // if this is the end marker, it must match the beginning marker + if oldmarker != "" && marker != oldmarker { + return 0, "" + } + + // TODO(shurcooL): It's probably a good idea to simplify the 2 code paths here + // into one, always get the info string, and discard it if the caller doesn't care. + if info != nil { + infoLength := 0 + i = skipChar(data, i, ' ') + + if i >= len(data) { + if newlineOptional && i == len(data) { + return i, marker + } + return 0, "" + } + + infoStart := i + + if data[i] == '{' { + i++ + infoStart++ + + for i < len(data) && data[i] != '}' && data[i] != '\n' { + infoLength++ + i++ + } + + if i >= len(data) || data[i] != '}' { + return 0, "" + } + + // strip all whitespace at the beginning and the end + // of the {} block + for infoLength > 0 && isspace(data[infoStart]) { + infoStart++ + infoLength-- + } + + for infoLength > 0 && isspace(data[infoStart+infoLength-1]) { + infoLength-- + } + + i++ + } else { + for i < len(data) && !isverticalspace(data[i]) { + infoLength++ + i++ + } + } + + *info = strings.TrimSpace(string(data[infoStart : infoStart+infoLength])) + } + + i = skipChar(data, i, ' ') + if i >= len(data) || data[i] != '\n' { + if newlineOptional && i == len(data) { + return i, marker + } + return 0, "" + } + + return i + 1, marker // Take newline into account. +} + +// fencedCodeBlock returns the end index if data contains a fenced code block at the beginning, +// or 0 otherwise. It writes to out if doRender is true, otherwise it has no side effects. +// If doRender is true, a final newline is mandatory to recognize the fenced code block. +func (p *parser) fencedCodeBlock(out *bytes.Buffer, data []byte, doRender bool) int { + var infoString string + beg, marker := isFenceLine(data, &infoString, "", false) + if beg == 0 || beg >= len(data) { + return 0 + } + + var work bytes.Buffer + + for { + // safe to assume beg < len(data) + + // check for the end of the code block + newlineOptional := !doRender + fenceEnd, _ := isFenceLine(data[beg:], nil, marker, newlineOptional) + if fenceEnd != 0 { + beg += fenceEnd + break + } + + // copy the current line + end := skipUntilChar(data, beg, '\n') + 1 + + // did we reach the end of the buffer without a closing marker? + if end >= len(data) { + return 0 + } + + // verbatim copy to the working buffer + if doRender { + work.Write(data[beg:end]) + } + beg = end + } + + if doRender { + p.r.BlockCode(out, work.Bytes(), infoString) + } + + return beg +} + +func (p *parser) table(out *bytes.Buffer, data []byte) int { + var header bytes.Buffer + i, columns := p.tableHeader(&header, data) + if i == 0 { + return 0 + } + + var body bytes.Buffer + + for i < len(data) { + pipes, rowStart := 0, i + for ; data[i] != '\n'; i++ { + if data[i] == '|' { + pipes++ + } + } + + if pipes == 0 { + i = rowStart + break + } + + // include the newline in data sent to tableRow + i++ + p.tableRow(&body, data[rowStart:i], columns, false) + } + + p.r.Table(out, header.Bytes(), body.Bytes(), columns) + + return i +} + +// check if the specified position is preceded by an odd number of backslashes +func isBackslashEscaped(data []byte, i int) bool { + backslashes := 0 + for i-backslashes-1 >= 0 && data[i-backslashes-1] == '\\' { + backslashes++ + } + return backslashes&1 == 1 +} + +func (p *parser) tableHeader(out *bytes.Buffer, data []byte) (size int, columns []int) { + i := 0 + colCount := 1 + for i = 0; data[i] != '\n'; i++ { + if data[i] == '|' && !isBackslashEscaped(data, i) { + colCount++ + } + } + + // doesn't look like a table header + if colCount == 1 { + return + } + + // include the newline in the data sent to tableRow + header := data[:i+1] + + // column count ignores pipes at beginning or end of line + if data[0] == '|' { + colCount-- + } + if i > 2 && data[i-1] == '|' && !isBackslashEscaped(data, i-1) { + colCount-- + } + + columns = make([]int, colCount) + + // move on to the header underline + i++ + if i >= len(data) { + return + } + + if data[i] == '|' && !isBackslashEscaped(data, i) { + i++ + } + i = skipChar(data, i, ' ') + + // each column header is of form: / *:?-+:? *|/ with # dashes + # colons >= 3 + // and trailing | optional on last column + col := 0 + for data[i] != '\n' { + dashes := 0 + + if data[i] == ':' { + i++ + columns[col] |= TABLE_ALIGNMENT_LEFT + dashes++ + } + for data[i] == '-' { + i++ + dashes++ + } + if data[i] == ':' { + i++ + columns[col] |= TABLE_ALIGNMENT_RIGHT + dashes++ + } + for data[i] == ' ' { + i++ + } + + // end of column test is messy + switch { + case dashes < 3: + // not a valid column + return + + case data[i] == '|' && !isBackslashEscaped(data, i): + // marker found, now skip past trailing whitespace + col++ + i++ + for data[i] == ' ' { + i++ + } + + // trailing junk found after last column + if col >= colCount && data[i] != '\n' { + return + } + + case (data[i] != '|' || isBackslashEscaped(data, i)) && col+1 < colCount: + // something else found where marker was required + return + + case data[i] == '\n': + // marker is optional for the last column + col++ + + default: + // trailing junk found after last column + return + } + } + if col != colCount { + return + } + + p.tableRow(out, header, columns, true) + size = i + 1 + return +} + +func (p *parser) tableRow(out *bytes.Buffer, data []byte, columns []int, header bool) { + i, col := 0, 0 + var rowWork bytes.Buffer + + if data[i] == '|' && !isBackslashEscaped(data, i) { + i++ + } + + for col = 0; col < len(columns) && i < len(data); col++ { + for data[i] == ' ' { + i++ + } + + cellStart := i + + for (data[i] != '|' || isBackslashEscaped(data, i)) && data[i] != '\n' { + i++ + } + + cellEnd := i + + // skip the end-of-cell marker, possibly taking us past end of buffer + i++ + + for cellEnd > cellStart && data[cellEnd-1] == ' ' { + cellEnd-- + } + + var cellWork bytes.Buffer + p.inline(&cellWork, data[cellStart:cellEnd]) + + if header { + p.r.TableHeaderCell(&rowWork, cellWork.Bytes(), columns[col]) + } else { + p.r.TableCell(&rowWork, cellWork.Bytes(), columns[col]) + } + } + + // pad it out with empty columns to get the right number + for ; col < len(columns); col++ { + if header { + p.r.TableHeaderCell(&rowWork, nil, columns[col]) + } else { + p.r.TableCell(&rowWork, nil, columns[col]) + } + } + + // silently ignore rows with too many cells + + p.r.TableRow(out, rowWork.Bytes()) +} + +// returns blockquote prefix length +func (p *parser) quotePrefix(data []byte) int { + i := 0 + for i < 3 && data[i] == ' ' { + i++ + } + if data[i] == '>' { + if data[i+1] == ' ' { + return i + 2 + } + return i + 1 + } + return 0 +} + +// blockquote ends with at least one blank line +// followed by something without a blockquote prefix +func (p *parser) terminateBlockquote(data []byte, beg, end int) bool { + if p.isEmpty(data[beg:]) <= 0 { + return false + } + if end >= len(data) { + return true + } + return p.quotePrefix(data[end:]) == 0 && p.isEmpty(data[end:]) == 0 +} + +// parse a blockquote fragment +func (p *parser) quote(out *bytes.Buffer, data []byte) int { + var raw bytes.Buffer + beg, end := 0, 0 + for beg < len(data) { + end = beg + // Step over whole lines, collecting them. While doing that, check for + // fenced code and if one's found, incorporate it altogether, + // irregardless of any contents inside it + for data[end] != '\n' { + if p.flags&EXTENSION_FENCED_CODE != 0 { + if i := p.fencedCodeBlock(out, data[end:], false); i > 0 { + // -1 to compensate for the extra end++ after the loop: + end += i - 1 + break + } + } + end++ + } + end++ + + if pre := p.quotePrefix(data[beg:]); pre > 0 { + // skip the prefix + beg += pre + } else if p.terminateBlockquote(data, beg, end) { + break + } + + // this line is part of the blockquote + raw.Write(data[beg:end]) + beg = end + } + + var cooked bytes.Buffer + p.block(&cooked, raw.Bytes()) + p.r.BlockQuote(out, cooked.Bytes()) + return end +} + +// returns prefix length for block code +func (p *parser) codePrefix(data []byte) int { + if data[0] == ' ' && data[1] == ' ' && data[2] == ' ' && data[3] == ' ' { + return 4 + } + return 0 +} + +func (p *parser) code(out *bytes.Buffer, data []byte) int { + var work bytes.Buffer + + i := 0 + for i < len(data) { + beg := i + for data[i] != '\n' { + i++ + } + i++ + + blankline := p.isEmpty(data[beg:i]) > 0 + if pre := p.codePrefix(data[beg:i]); pre > 0 { + beg += pre + } else if !blankline { + // non-empty, non-prefixed line breaks the pre + i = beg + break + } + + // verbatim copy to the working buffeu + if blankline { + work.WriteByte('\n') + } else { + work.Write(data[beg:i]) + } + } + + // trim all the \n off the end of work + workbytes := work.Bytes() + eol := len(workbytes) + for eol > 0 && workbytes[eol-1] == '\n' { + eol-- + } + if eol != len(workbytes) { + work.Truncate(eol) + } + + work.WriteByte('\n') + + p.r.BlockCode(out, work.Bytes(), "") + + return i +} + +// returns unordered list item prefix +func (p *parser) uliPrefix(data []byte) int { + i := 0 + + // start with up to 3 spaces + for i < 3 && data[i] == ' ' { + i++ + } + + // need a *, +, or - followed by a space + if (data[i] != '*' && data[i] != '+' && data[i] != '-') || + data[i+1] != ' ' { + return 0 + } + return i + 2 +} + +// returns ordered list item prefix +func (p *parser) oliPrefix(data []byte) int { + i := 0 + + // start with up to 3 spaces + for i < 3 && data[i] == ' ' { + i++ + } + + // count the digits + start := i + for data[i] >= '0' && data[i] <= '9' { + i++ + } + + // we need >= 1 digits followed by a dot and a space + if start == i || data[i] != '.' || data[i+1] != ' ' { + return 0 + } + return i + 2 +} + +// returns definition list item prefix +func (p *parser) dliPrefix(data []byte) int { + i := 0 + + // need a : followed by a spaces + if data[i] != ':' || data[i+1] != ' ' { + return 0 + } + for data[i] == ' ' { + i++ + } + return i + 2 +} + +// parse ordered or unordered list block +func (p *parser) list(out *bytes.Buffer, data []byte, flags int) int { + i := 0 + flags |= LIST_ITEM_BEGINNING_OF_LIST + work := func() bool { + for i < len(data) { + skip := p.listItem(out, data[i:], &flags) + i += skip + + if skip == 0 || flags&LIST_ITEM_END_OF_LIST != 0 { + break + } + flags &= ^LIST_ITEM_BEGINNING_OF_LIST + } + return true + } + + p.r.List(out, work, flags) + return i +} + +// Parse a single list item. +// Assumes initial prefix is already removed if this is a sublist. +func (p *parser) listItem(out *bytes.Buffer, data []byte, flags *int) int { + // keep track of the indentation of the first line + itemIndent := 0 + for itemIndent < 3 && data[itemIndent] == ' ' { + itemIndent++ + } + + i := p.uliPrefix(data) + if i == 0 { + i = p.oliPrefix(data) + } + if i == 0 { + i = p.dliPrefix(data) + // reset definition term flag + if i > 0 { + *flags &= ^LIST_TYPE_TERM + } + } + if i == 0 { + // if in defnition list, set term flag and continue + if *flags&LIST_TYPE_DEFINITION != 0 { + *flags |= LIST_TYPE_TERM + } else { + return 0 + } + } + + // skip leading whitespace on first line + for data[i] == ' ' { + i++ + } + + // find the end of the line + line := i + for i > 0 && data[i-1] != '\n' { + i++ + } + + // get working buffer + var raw bytes.Buffer + + // put the first line into the working buffer + raw.Write(data[line:i]) + line = i + + // process the following lines + containsBlankLine := false + sublist := 0 + codeBlockMarker := "" + +gatherlines: + for line < len(data) { + i++ + + // find the end of this line + for data[i-1] != '\n' { + i++ + } + + // if it is an empty line, guess that it is part of this item + // and move on to the next line + if p.isEmpty(data[line:i]) > 0 { + containsBlankLine = true + raw.Write(data[line:i]) + line = i + continue + } + + // calculate the indentation + indent := 0 + for indent < 4 && line+indent < i && data[line+indent] == ' ' { + indent++ + } + + chunk := data[line+indent : i] + + if p.flags&EXTENSION_FENCED_CODE != 0 { + // determine if in or out of codeblock + // if in codeblock, ignore normal list processing + _, marker := isFenceLine(chunk, nil, codeBlockMarker, false) + if marker != "" { + if codeBlockMarker == "" { + // start of codeblock + codeBlockMarker = marker + } else { + // end of codeblock. + *flags |= LIST_ITEM_CONTAINS_BLOCK + codeBlockMarker = "" + } + } + // we are in a codeblock, write line, and continue + if codeBlockMarker != "" || marker != "" { + raw.Write(data[line+indent : i]) + line = i + continue gatherlines + } + } + + // evaluate how this line fits in + switch { + // is this a nested list item? + case (p.uliPrefix(chunk) > 0 && !p.isHRule(chunk)) || + p.oliPrefix(chunk) > 0 || + p.dliPrefix(chunk) > 0: + + if containsBlankLine { + // end the list if the type changed after a blank line + if indent <= itemIndent && + ((*flags&LIST_TYPE_ORDERED != 0 && p.uliPrefix(chunk) > 0) || + (*flags&LIST_TYPE_ORDERED == 0 && p.oliPrefix(chunk) > 0)) { + + *flags |= LIST_ITEM_END_OF_LIST + break gatherlines + } + *flags |= LIST_ITEM_CONTAINS_BLOCK + } + + // to be a nested list, it must be indented more + // if not, it is the next item in the same list + if indent <= itemIndent { + break gatherlines + } + + // is this the first item in the nested list? + if sublist == 0 { + sublist = raw.Len() + } + + // is this a nested prefix header? + case p.isPrefixHeader(chunk): + // if the header is not indented, it is not nested in the list + // and thus ends the list + if containsBlankLine && indent < 4 { + *flags |= LIST_ITEM_END_OF_LIST + break gatherlines + } + *flags |= LIST_ITEM_CONTAINS_BLOCK + + // anything following an empty line is only part + // of this item if it is indented 4 spaces + // (regardless of the indentation of the beginning of the item) + case containsBlankLine && indent < 4: + if *flags&LIST_TYPE_DEFINITION != 0 && i < len(data)-1 { + // is the next item still a part of this list? + next := i + for data[next] != '\n' { + next++ + } + for next < len(data)-1 && data[next] == '\n' { + next++ + } + if i < len(data)-1 && data[i] != ':' && data[next] != ':' { + *flags |= LIST_ITEM_END_OF_LIST + } + } else { + *flags |= LIST_ITEM_END_OF_LIST + } + break gatherlines + + // a blank line means this should be parsed as a block + case containsBlankLine: + *flags |= LIST_ITEM_CONTAINS_BLOCK + } + + containsBlankLine = false + + // add the line into the working buffer without prefix + raw.Write(data[line+indent : i]) + + line = i + } + + // If reached end of data, the Renderer.ListItem call we're going to make below + // is definitely the last in the list. + if line >= len(data) { + *flags |= LIST_ITEM_END_OF_LIST + } + + rawBytes := raw.Bytes() + + // render the contents of the list item + var cooked bytes.Buffer + if *flags&LIST_ITEM_CONTAINS_BLOCK != 0 && *flags&LIST_TYPE_TERM == 0 { + // intermediate render of block item, except for definition term + if sublist > 0 { + p.block(&cooked, rawBytes[:sublist]) + p.block(&cooked, rawBytes[sublist:]) + } else { + p.block(&cooked, rawBytes) + } + } else { + // intermediate render of inline item + if sublist > 0 { + p.inline(&cooked, rawBytes[:sublist]) + p.block(&cooked, rawBytes[sublist:]) + } else { + p.inline(&cooked, rawBytes) + } + } + + // render the actual list item + cookedBytes := cooked.Bytes() + parsedEnd := len(cookedBytes) + + // strip trailing newlines + for parsedEnd > 0 && cookedBytes[parsedEnd-1] == '\n' { + parsedEnd-- + } + p.r.ListItem(out, cookedBytes[:parsedEnd], *flags) + + return line +} + +// render a single paragraph that has already been parsed out +func (p *parser) renderParagraph(out *bytes.Buffer, data []byte) { + if len(data) == 0 { + return + } + + // trim leading spaces + beg := 0 + for data[beg] == ' ' { + beg++ + } + + // trim trailing newline + end := len(data) - 1 + + // trim trailing spaces + for end > beg && data[end-1] == ' ' { + end-- + } + + work := func() bool { + p.inline(out, data[beg:end]) + return true + } + p.r.Paragraph(out, work) +} + +func (p *parser) paragraph(out *bytes.Buffer, data []byte) int { + // prev: index of 1st char of previous line + // line: index of 1st char of current line + // i: index of cursor/end of current line + var prev, line, i int + + // keep going until we find something to mark the end of the paragraph + for i < len(data) { + // mark the beginning of the current line + prev = line + current := data[i:] + line = i + + // did we find a blank line marking the end of the paragraph? + if n := p.isEmpty(current); n > 0 { + // did this blank line followed by a definition list item? + if p.flags&EXTENSION_DEFINITION_LISTS != 0 { + if i < len(data)-1 && data[i+1] == ':' { + return p.list(out, data[prev:], LIST_TYPE_DEFINITION) + } + } + + p.renderParagraph(out, data[:i]) + return i + n + } + + // an underline under some text marks a header, so our paragraph ended on prev line + if i > 0 { + if level := p.isUnderlinedHeader(current); level > 0 { + // render the paragraph + p.renderParagraph(out, data[:prev]) + + // ignore leading and trailing whitespace + eol := i - 1 + for prev < eol && data[prev] == ' ' { + prev++ + } + for eol > prev && data[eol-1] == ' ' { + eol-- + } + + // render the header + // this ugly double closure avoids forcing variables onto the heap + work := func(o *bytes.Buffer, pp *parser, d []byte) func() bool { + return func() bool { + pp.inline(o, d) + return true + } + }(out, p, data[prev:eol]) + + id := "" + if p.flags&EXTENSION_AUTO_HEADER_IDS != 0 { + id = SanitizedAnchorName(string(data[prev:eol])) + } + + p.r.Header(out, work, level, id) + + // find the end of the underline + for data[i] != '\n' { + i++ + } + return i + } + } + + // if the next line starts a block of HTML, then the paragraph ends here + if p.flags&EXTENSION_LAX_HTML_BLOCKS != 0 { + if data[i] == '<' && p.html(out, current, false) > 0 { + // rewind to before the HTML block + p.renderParagraph(out, data[:i]) + return i + } + } + + // if there's a prefixed header or a horizontal rule after this, paragraph is over + if p.isPrefixHeader(current) || p.isHRule(current) { + p.renderParagraph(out, data[:i]) + return i + } + + // if there's a fenced code block, paragraph is over + if p.flags&EXTENSION_FENCED_CODE != 0 { + if p.fencedCodeBlock(out, current, false) > 0 { + p.renderParagraph(out, data[:i]) + return i + } + } + + // if there's a definition list item, prev line is a definition term + if p.flags&EXTENSION_DEFINITION_LISTS != 0 { + if p.dliPrefix(current) != 0 { + return p.list(out, data[prev:], LIST_TYPE_DEFINITION) + } + } + + // if there's a list after this, paragraph is over + if p.flags&EXTENSION_NO_EMPTY_LINE_BEFORE_BLOCK != 0 { + if p.uliPrefix(current) != 0 || + p.oliPrefix(current) != 0 || + p.quotePrefix(current) != 0 || + p.codePrefix(current) != 0 { + p.renderParagraph(out, data[:i]) + return i + } + } + + // otherwise, scan to the beginning of the next line + for data[i] != '\n' { + i++ + } + i++ + } + + p.renderParagraph(out, data[:i]) + return i +} + +// SanitizedAnchorName returns a sanitized anchor name for the given text. +// +// It implements the algorithm specified in the package comment. +func SanitizedAnchorName(text string) string { + var anchorName []rune + futureDash := false + for _, r := range text { + switch { + case unicode.IsLetter(r) || unicode.IsNumber(r): + if futureDash && len(anchorName) > 0 { + anchorName = append(anchorName, '-') + } + futureDash = false + anchorName = append(anchorName, unicode.ToLower(r)) + default: + futureDash = true + } + } + return string(anchorName) +} diff --git a/vendor/github.com/russross/blackfriday/doc.go b/vendor/github.com/russross/blackfriday/doc.go new file mode 100644 index 0000000000..9656c42a19 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/doc.go @@ -0,0 +1,32 @@ +// Package blackfriday is a Markdown processor. +// +// It translates plain text with simple formatting rules into HTML or LaTeX. +// +// Sanitized Anchor Names +// +// Blackfriday includes an algorithm for creating sanitized anchor names +// corresponding to a given input text. This algorithm is used to create +// anchors for headings when EXTENSION_AUTO_HEADER_IDS is enabled. The +// algorithm is specified below, so that other packages can create +// compatible anchor names and links to those anchors. +// +// The algorithm iterates over the input text, interpreted as UTF-8, +// one Unicode code point (rune) at a time. All runes that are letters (category L) +// or numbers (category N) are considered valid characters. They are mapped to +// lower case, and included in the output. All other runes are considered +// invalid characters. Invalid characters that preceed the first valid character, +// as well as invalid character that follow the last valid character +// are dropped completely. All other sequences of invalid characters +// between two valid characters are replaced with a single dash character '-'. +// +// SanitizedAnchorName exposes this functionality, and can be used to +// create compatible links to the anchor names generated by blackfriday. +// This algorithm is also implemented in a small standalone package at +// github.com/shurcooL/sanitized_anchor_name. It can be useful for clients +// that want a small package and don't need full functionality of blackfriday. +package blackfriday + +// NOTE: Keep Sanitized Anchor Name algorithm in sync with package +// github.com/shurcooL/sanitized_anchor_name. +// Otherwise, users of sanitized_anchor_name will get anchor names +// that are incompatible with those generated by blackfriday. diff --git a/vendor/github.com/russross/blackfriday/html.go b/vendor/github.com/russross/blackfriday/html.go new file mode 100644 index 0000000000..e0a6c69c96 --- /dev/null +++ b/vendor/github.com/russross/blackfriday/html.go @@ -0,0 +1,938 @@ +// +// Blackfriday Markdown Processor +// Available at http://github.com/russross/blackfriday +// +// Copyright © 2011 Russ Ross . +// Distributed under the Simplified BSD License. +// See README.md for details. +// + +// +// +// HTML rendering backend +// +// + +package blackfriday + +import ( + "bytes" + "fmt" + "regexp" + "strconv" + "strings" +) + +// Html renderer configuration options. +const ( + HTML_SKIP_HTML = 1 << iota // skip preformatted HTML blocks + HTML_SKIP_STYLE // skip embedded