diff --git a/Makefile b/Makefile index 2ec711e01..e914bd71b 100644 --- a/Makefile +++ b/Makefile @@ -45,9 +45,20 @@ build: install: go install -tags "$(TAGS)" $(PKGS) +proto: + go get -v go.pedge.io/protoeasy/cmd/protoeasy + go get -v go.pedge.io/pkg/cmd/strip-package-comments + protoeasy --exclude vendor --go --go-import-path github.com/libopenstorage/openstorage . + find . -name *\.pb\*\.go | xargs strip-package-comments + lint: go get -v github.com/golang/lint/golint - $(foreach pkg,$(PKGS),golint $(pkg);) + for file in $$(find . -name '*.go' | grep -v vendor | grep -v '\.pb\.go' | grep -v '\.pb\.gw\.go'); do \ + golint $${file}; \ + if [ -n "$$(golint $${file})" ]; then \ + exit 1; \ + fi; \ + done vet: go vet $(PKGS) @@ -61,10 +72,22 @@ pretest: lint vet errcheck test: go test -tags "$(TAGS)" $(TESTFLAGS) $(PKGS) -docker-build: +docker-build-osd-dev: docker build -t openstorage/osd-dev -f Dockerfile.osd-dev . -docker-test: docker-build +docker-build: docker-build-osd-dev + docker run \ + --privileged \ + -v /var/run/docker.sock:/var/run/docker.sock \ + -e AWS_ACCESS_KEY_ID \ + -e AWS_SECRET_ACCESS_KEY \ + -e "TAGS=$(TAGS)" \ + -e "PKGS=$(PKGS)" \ + -e "BUILDFLAGS=$(BUILDFLAGS)" \ + openstorage/osd-dev \ + make build + +docker-test: docker-build-osd-dev docker run \ --privileged \ -v /var/run/docker.sock:/var/run/docker.sock \ @@ -83,7 +106,7 @@ docker-build-osd-internal: go build -a -tags "$(TAGS)" -o _tmp/osd cmd/osd/main.go docker build -t openstorage/osd -f Dockerfile.osd . -docker-build-osd: docker-build +docker-build-osd: docker-build-osd-dev docker run \ -v /var/run/docker.sock:/var/run/docker.sock \ -e "TAGS=$(TAGS)" \ @@ -113,11 +136,13 @@ clean: vendor \ build \ install \ + proto \ lint \ vet \ errcheck \ pretest \ test \ + docker-build-osd-dev \ docker-build \ docker-test \ docker-build-osd-internal \ diff --git a/README.md b/README.md index 94719de43..45510f5da 100644 --- a/README.md +++ b/README.md @@ -242,6 +242,36 @@ WantedBy=multi-user.target The specification and code is licensed under the Apache 2.0 license found in the `LICENSE` file of this repository. +See the [Style Guide](STYLEGUIDE.md). + +### Protoeasy quick start + +https://go.pedge.io/protoeasy + +``` +docker pull quay.io/pedge/protoeasy +``` + +Add to your ~/.bashrc (or equivalent): + +``` +# to use protoeasy for now, you must have docker installed locally or in a vm +# if running docker using docker-machine etc, replace 192.168.10.10 with the ip of the vm +# if running docker locally, replace 192.168.10.10 with 0.0.0.0 +export PROTOEASY_ADDRESS=192.168.10.10:6789 + +launch-protoeasy() { + docker rm -f protoeasy || true + docker run -d -p 6789:6789 --name=protoeasy quay.io/pedge/protoeasy +} +``` + +Then just run `launch-protoeasy` before compiling the protocol buffers files, and then to compile: + +``` +make proto +``` + ### Sign your work The sign-off is a simple line at the end of the explanation for the @@ -296,4 +326,3 @@ then you just add a line to every git commit message: using your real name (sorry, no pseudonyms or anonymous contributions.) You can add the sign off when creating the git commit via `git commit -s`. - diff --git a/STYLEGUIDE.md b/STYLEGUIDE.md new file mode 100644 index 000000000..60299d314 --- /dev/null +++ b/STYLEGUIDE.md @@ -0,0 +1,195 @@ +# Style Guide + +This is the official openstorage style guide for golang code. This is in addition to the offical style guide at https://github.com/golang/go/wiki/CodeReviewComments. + +This is just a rough outline for now, we will formalize this as we go. + +IF YOU CODE ON OPENSTORAGE, YOU ARE EXPECTED TO KNOW THIS. Just take the 20 minutes and read through the issues, we will buy you a coffee, maybe. + +### Relevant Issues + +* https://github.com/libopenstorage/openstorage/issues/100 +* https://github.com/libopenstorage/openstorage/issues/88 +* https://github.com/libopenstorage/openstorage/issues/97 +* https://github.com/libopenstorage/openstorage/issues/87 +* https://github.com/libopenstorage/openstorage/issues/92 +* https://github.com/libopenstorage/openstorage/issues/89 +* https://github.com/libopenstorage/openstorage/issues/76 +* https://github.com/libopenstorage/openstorage/issues/71 +* https://github.com/libopenstorage/openstorage/issues/96 + +### Items + +* Use [dlog](https://go.pedge.io/dlog) for logging. + +* File order: + +```go +package pkg + +const ( + ... +) + +var ( + ... +) + +// but init should generally be not used +func init() { +} + +// public struct + +// public struct functions + +// private struct functions + +// private functions but only if they just apply to this struct, otherwise in a common file +``` + +* All new code must pass `go vet`, `errcheck`, `golint`. For `errcheck`, this means no more unchecked errors. Use of `_ = someFnThatReturnsError()` will be audited soon, and in general should not be used anymore. `golint` means all public types need comments to pass, which means both (A) we will have better code documentation and (B) we will think more about what should actually be public. `errcheck` and `golint` are great detterrents. + +* All packages have a file named `name_of_package.go`, ie in `api/server`, we have `server.go`. + +* Packages are named after their directory, ie `api/` is `package api`. + +* **ALL PUBLIC TYPES GO IN `name_of_package.go`.** Every other file is just a helper file that implements the types. + +* Variable names should reflect the type, ie an instance of `Runner` should be `runner`. This is in contrast to golang's official recommendation, but has been found to make code more readable. Heh. So this means `api.Volume` is not `v, vol, whatever`, it's `volume`, a `request` is not `req, createReq, r`, it's `request`. Only exception is the receiver argument on a function, ie `func (s *server) Foo(...) { ... }`. + +* Structs without a corresponding interface are data holders. Structs with functions attached have a public interface wrapper, and then the struct becomes private. Example: + +```go +// foo.go +package foo + +type Runner interace { + Run(one string, i int) error +} + +func NewRunner(something bar.Something) Runner { + return newRunner(something) +} + +// runner.go +package foo + +type runner struct{ + something bar.Something +} + +func newRunner(something bar.Something) *runner { + return &runner{something} +} + +func (r *runner) Run(one string, i int) error { + r.hello(i) + return r.something.Bar(one, i+1) +} + +func (r *runner) hello(i int) { + return r.something.Hello(i) +} +``` + +* Most structs that have functions attached have a separate file with the private struct definition, private constructor, and public functions, then private functions. The runner struct above is an example. + +* Use struct pointers in general instead of structs. It's a debate, but not for now. + +* Function parameters/struct initialization/function calls etc are either on one line or each parameter/field has a new line for it. Example: + +```go +// yes +function oneLine(a string, b string, c string) { +} + +//yes +function multiLine( + a string, + b string, + c string, +) { +} + +//no +function multiLineNo( + a string, + b string, + c string) { +} + +// no +function multiLineNo2(a string, + b string, c string) { +} +``` + +* **NO CALLING `os.Exit(...)` OR `panic(...)` IN LIBRARY CODE.** Ie nowhere but a main package. + +* New introductions of global variables and init functions have to be vetted extensively by project owners (and existing ones should be deleted as much as we can). + +* No reliance on freeform string matching for errors. + +* No typing dynamic value primitives. Example: + +```go +// no +type VolumeID string + +type Volume struct { + VolumeID VolumeID + ... +} + +//yes +type Volume struct { + VolumeID string +} +``` + +* Static value primitives (also known as enums) are not strings. Most new ones should be generated with protobuf, see [api/api.proto](api/api.proto) for examples. + +* Remove most uses of private variables in public structs. + +* Remove most extra variable definitions that are not needed or turn into constants (https://github.com/libopenstorage/openstorage/blob/8d07329468ef709838e443dc17b1eecf2c7cf77d/api/server/volume.go#L76). + +* Reduce adding of String() methods on most objects (let the generic `%+v` take care of it). + +* Use less newlines within methods. + +* Single errors are scoped within an if statement: + +```go +// no +err := foo() +if err != nil { + return nil, err +} + +// yes +if err := foo(); err != nil { + return nil, err +} + +// yes, if ignoring return value +// if _, err := bar(); err != nil { + return nil, err +} +``` + +* Empty structs: + +```go +// no +type EmptyStruct { +} +// yes +type EmptyStruct {} +``` + +* Blank imports should have explanation (https://github.com/libopenstorage/openstorage/blob/8d07329468ef709838e443dc17b1eecf2c7cf77d/volume/enumerator.go#L6). + +* No code checked in that has warnings https://golang.org/cmd/cgo/. + +* Do not check in if `make docker-test` does not pass. diff --git a/api/api.go b/api/api.go index eb7746be5..e4a6b4753 100644 --- a/api/api.go +++ b/api/api.go @@ -1,14 +1,138 @@ -/* -Package api defines the payload structures used between the client and the apiserver for the volumes and cluster packages. -*/ package api -type DriverType int +import ( + "fmt" + "strconv" + "strings" + "time" + "github.com/fsouza/go-dockerclient" + "github.com/portworx/systemutils" +) + +// Strings for VolumeSpec +const ( + SpecEphemeral = "ephemeral" + SpecSize = "size" + SpecFilesystem = "format" + SpecBlockSize = "blocksize" + SpecHaLevel = "ha_level" + SpecCos = "cos" + SpecSnapshotInterval = "snapshot_interval" + SpecDedupe = "dedupe" +) + +// OptionKey specifies a set of recognized query params const ( - File = 1 << iota - Block - Object - Clustered - Graph + // OptName query parameter used to lookup volume by name + OptName = "Name" + // OptVolumeID query parameter used to lookup volume by ID. + OptVolumeID = "VolumeID" + // OptLabel query parameter used to lookup volume by set of labels. + OptLabel = "Label" + // OptConfigLabel query parameter used to lookup volume by set of labels. + OptConfigLabel = "ConfigLabel" ) + +// Node describes the state of a node. +// It includes the current physical state (CPU, memory, storage, network usage) as +// well as the containers running on the system. +type Node struct { + Id string + Cpu float64 // percentage. + Memory float64 // percentage. + Luns map[string]systemutils.Lun + Avgload int + Ip string + Timestamp time.Time + Status Status + Containers []docker.APIContainers + NodeData map[string]interface{} + GenNumber uint64 +} + +// Cluster represents the state of the cluster. +type Cluster struct { + Status Status + Id string + Nodes []Node +} + +func StatusSimpleValueOf(s string) (Status, error) { + obj, err := simpleValueOf("status", Status_value, s) + return Status(obj), err +} + +func (x Status) SimpleString() string { + return simpleString("status", Status_name, int32(x)) +} + +func DriverTypeSimpleValueOf(s string) (DriverType, error) { + obj, err := simpleValueOf("driver_type", DriverType_value, s) + return DriverType(obj), err +} + +func (x DriverType) SimpleString() string { + return simpleString("driver_type", DriverType_name, int32(x)) +} + +func FSTypeSimpleValueOf(s string) (FSType, error) { + obj, err := simpleValueOf("fs_type", FSType_value, s) + return FSType(obj), err +} + +func (x FSType) SimpleString() string { + return simpleString("fs_type", FSType_name, int32(x)) +} + +func GraphDriverChangeTypeSimpleValueOf(s string) (GraphDriverChangeType, error) { + obj, err := simpleValueOf("graph_driver_change_type", GraphDriverChangeType_value, s) + return GraphDriverChangeType(obj), err +} + +func (x GraphDriverChangeType) SimpleString() string { + return simpleString("graph_driver_change_type", GraphDriverChangeType_name, int32(x)) +} + +func VolumeActionParamSimpleValueOf(s string) (VolumeActionParam, error) { + obj, err := simpleValueOf("volume_action_param", VolumeActionParam_value, s) + return VolumeActionParam(obj), err +} + +func (x VolumeActionParam) SimpleString() string { + return simpleString("volume_action_param", VolumeActionParam_name, int32(x)) +} + +func VolumeStateSimpleValueOf(s string) (VolumeState, error) { + obj, err := simpleValueOf("volume_state", VolumeState_value, s) + return VolumeState(obj), err +} + +func (x VolumeState) SimpleString() string { + return simpleString("volume_state", VolumeState_name, int32(x)) +} + +func VolumeStatusSimpleValueOf(s string) (VolumeStatus, error) { + obj, err := simpleValueOf("volume_status", VolumeStatus_value, s) + return VolumeStatus(obj), err +} + +func (x VolumeStatus) SimpleString() string { + return simpleString("volume_status", VolumeStatus_name, int32(x)) +} + +func simpleValueOf(typeString string, valueMap map[string]int32, s string) (int32, error) { + obj, ok := valueMap[strings.ToUpper(fmt.Sprintf("%s_%s", typeString, s))] + if !ok { + return 0, fmt.Errorf("no openstorage.%s for %s", strings.ToUpper(typeString), s) + } + return obj, nil +} + +func simpleString(typeString string, nameMap map[int32]string, v int32) string { + s, ok := nameMap[v] + if !ok { + return strconv.Itoa(int(v)) + } + return strings.TrimPrefix(strings.ToLower(s), fmt.Sprintf("%s_", strings.ToLower(typeString))) +} diff --git a/api/api.pb.go b/api/api.pb.go new file mode 100644 index 000000000..a8f9a5801 --- /dev/null +++ b/api/api.pb.go @@ -0,0 +1,761 @@ +// Code generated by protoc-gen-go. +// source: api/api.proto +// DO NOT EDIT! + +package api + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "go.pedge.io/pb/go/google/protobuf" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.ProtoPackageIsVersion1 + +// CHANGE: no NONE, was 1 << iota +type Status int32 + +const ( + Status_STATUS_NONE Status = 0 + Status_STATUS_INIT Status = 1 + Status_STATUS_OK Status = 2 + Status_STATUS_OFFLINE Status = 3 + Status_STATUS_ERROR Status = 4 +) + +var Status_name = map[int32]string{ + 0: "STATUS_NONE", + 1: "STATUS_INIT", + 2: "STATUS_OK", + 3: "STATUS_OFFLINE", + 4: "STATUS_ERROR", +} +var Status_value = map[string]int32{ + "STATUS_NONE": 0, + "STATUS_INIT": 1, + "STATUS_OK": 2, + "STATUS_OFFLINE": 3, + "STATUS_ERROR": 4, +} + +func (x Status) String() string { + return proto.EnumName(Status_name, int32(x)) +} +func (Status) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +// CHANGE: no NONE, was 1 << iota +type DriverType int32 + +const ( + DriverType_DRIVER_TYPE_NONE DriverType = 0 + DriverType_DRIVER_TYPE_FILE DriverType = 1 + DriverType_DRIVER_TYPE_BLOCK DriverType = 2 + DriverType_DRIVER_TYPE_OBJECT DriverType = 3 + DriverType_DRIVER_TYPE_CLUSTERED DriverType = 4 + DriverType_DRIVER_TYPE_GRAPH DriverType = 5 +) + +var DriverType_name = map[int32]string{ + 0: "DRIVER_TYPE_NONE", + 1: "DRIVER_TYPE_FILE", + 2: "DRIVER_TYPE_BLOCK", + 3: "DRIVER_TYPE_OBJECT", + 4: "DRIVER_TYPE_CLUSTERED", + 5: "DRIVER_TYPE_GRAPH", +} +var DriverType_value = map[string]int32{ + "DRIVER_TYPE_NONE": 0, + "DRIVER_TYPE_FILE": 1, + "DRIVER_TYPE_BLOCK": 2, + "DRIVER_TYPE_OBJECT": 3, + "DRIVER_TYPE_CLUSTERED": 4, + "DRIVER_TYPE_GRAPH": 5, +} + +func (x DriverType) String() string { + return proto.EnumName(DriverType_name, int32(x)) +} +func (DriverType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +// CHANGE: was Filesystem, no NONE, was a string +type FSType int32 + +const ( + FSType_FS_TYPE_NONE FSType = 0 + FSType_FS_TYPE_BTRFS FSType = 1 + FSType_FS_TYPE_EXT4 FSType = 2 + FSType_FS_TYPE_FUSE FSType = 3 + FSType_FS_TYPE_NFS FSType = 4 + FSType_FS_TYPE_VFS FSType = 5 + FSType_FS_TYPE_XFS FSType = 6 + FSType_FS_TYPE_ZFS FSType = 7 +) + +var FSType_name = map[int32]string{ + 0: "FS_TYPE_NONE", + 1: "FS_TYPE_BTRFS", + 2: "FS_TYPE_EXT4", + 3: "FS_TYPE_FUSE", + 4: "FS_TYPE_NFS", + 5: "FS_TYPE_VFS", + 6: "FS_TYPE_XFS", + 7: "FS_TYPE_ZFS", +} +var FSType_value = map[string]int32{ + "FS_TYPE_NONE": 0, + "FS_TYPE_BTRFS": 1, + "FS_TYPE_EXT4": 2, + "FS_TYPE_FUSE": 3, + "FS_TYPE_NFS": 4, + "FS_TYPE_VFS": 5, + "FS_TYPE_XFS": 6, + "FS_TYPE_ZFS": 7, +} + +func (x FSType) String() string { + return proto.EnumName(FSType_name, int32(x)) +} +func (FSType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +// CHANGE: was an int, 0 was modified, 1 was added, 2 was deleted +type GraphDriverChangeType int32 + +const ( + GraphDriverChangeType_GRAPH_DRIVER_CHANGE_TYPE_NONE GraphDriverChangeType = 0 + GraphDriverChangeType_GRAPH_DRIVER_CHANGE_TYPE_MODIFIED GraphDriverChangeType = 1 + GraphDriverChangeType_GRAPH_DRIVER_CHANGE_TYPE_ADDED GraphDriverChangeType = 2 + GraphDriverChangeType_GRAPH_DRIVER_CHANGE_TYPE_DELETED GraphDriverChangeType = 3 +) + +var GraphDriverChangeType_name = map[int32]string{ + 0: "GRAPH_DRIVER_CHANGE_TYPE_NONE", + 1: "GRAPH_DRIVER_CHANGE_TYPE_MODIFIED", + 2: "GRAPH_DRIVER_CHANGE_TYPE_ADDED", + 3: "GRAPH_DRIVER_CHANGE_TYPE_DELETED", +} +var GraphDriverChangeType_value = map[string]int32{ + "GRAPH_DRIVER_CHANGE_TYPE_NONE": 0, + "GRAPH_DRIVER_CHANGE_TYPE_MODIFIED": 1, + "GRAPH_DRIVER_CHANGE_TYPE_ADDED": 2, + "GRAPH_DRIVER_CHANGE_TYPE_DELETED": 3, +} + +func (x GraphDriverChangeType) String() string { + return proto.EnumName(GraphDriverChangeType_name, int32(x)) +} +func (GraphDriverChangeType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +// CHANGE: no NONE +type VolumeActionParam int32 + +const ( + VolumeActionParam_VOLUME_ACTION_PARAM_NONE VolumeActionParam = 0 + // Maps to the boolean value false + VolumeActionParam_VOLUME_ACTION_PARAM_OFF VolumeActionParam = 1 + // Maps to the boolean value true. + VolumeActionParam_VOLUME_ACTION_PARAM_ON VolumeActionParam = 2 +) + +var VolumeActionParam_name = map[int32]string{ + 0: "VOLUME_ACTION_PARAM_NONE", + 1: "VOLUME_ACTION_PARAM_OFF", + 2: "VOLUME_ACTION_PARAM_ON", +} +var VolumeActionParam_value = map[string]int32{ + "VOLUME_ACTION_PARAM_NONE": 0, + "VOLUME_ACTION_PARAM_OFF": 1, + "VOLUME_ACTION_PARAM_ON": 2, +} + +func (x VolumeActionParam) String() string { + return proto.EnumName(VolumeActionParam_name, int32(x)) +} +func (VolumeActionParam) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +// VolumeState represents the state of a volume. +// CHANGE: no NONE, was 1 << iota (and was bit or'ed/and'ed) +type VolumeState int32 + +const ( + VolumeState_VOLUME_STATE_NONE VolumeState = 0 + // Volume is transitioning to new state + VolumeState_VOLUME_STATE_PENDING VolumeState = 1 + // Volume is ready to be assigned to a container + VolumeState_VOLUME_STATE_AVAILABLE VolumeState = 2 + // Volume is attached to container + VolumeState_VOLUME_STATE_ATTACHED VolumeState = 3 + // Volume is detached but associated with a container + VolumeState_VOLUME_STATE_DETACHED VolumeState = 4 + // Volume detach is in progress + VolumeState_VOLUME_STATE_DETATCHING VolumeState = 5 + // Volume is in error state + VolumeState_VOLUME_STATE_ERROR VolumeState = 6 + // Volume is deleted, it will remain in this state + // while resources are asynchronously reclaimed + VolumeState_VOLUME_STATE_DELETED VolumeState = 7 +) + +var VolumeState_name = map[int32]string{ + 0: "VOLUME_STATE_NONE", + 1: "VOLUME_STATE_PENDING", + 2: "VOLUME_STATE_AVAILABLE", + 3: "VOLUME_STATE_ATTACHED", + 4: "VOLUME_STATE_DETACHED", + 5: "VOLUME_STATE_DETATCHING", + 6: "VOLUME_STATE_ERROR", + 7: "VOLUME_STATE_DELETED", +} +var VolumeState_value = map[string]int32{ + "VOLUME_STATE_NONE": 0, + "VOLUME_STATE_PENDING": 1, + "VOLUME_STATE_AVAILABLE": 2, + "VOLUME_STATE_ATTACHED": 3, + "VOLUME_STATE_DETACHED": 4, + "VOLUME_STATE_DETATCHING": 5, + "VOLUME_STATE_ERROR": 6, + "VOLUME_STATE_DELETED": 7, +} + +func (x VolumeState) String() string { + return proto.EnumName(VolumeState_name, int32(x)) +} +func (VolumeState) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +// VolumeStatus represents a health status for a volume. +// CHANGE: no NONE, was a string +type VolumeStatus int32 + +const ( + VolumeStatus_VOLUME_STATUS_NONE VolumeStatus = 0 + // Volume is not present + VolumeStatus_VOLUME_STATUS_NOT_PRESENT VolumeStatus = 1 + // Volume is healthy + VolumeStatus_VOLUME_STATUS_UP VolumeStatus = 2 + // Volume is in fail mode + VolumeStatus_VOLUME_STATUS_DOWN VolumeStatus = 3 + // Volume is up but with degraded performance + // In a RAID group, this may indicate a problem with one or more drives + VolumeStatus_VOLUME_STATUS_DEGRADED VolumeStatus = 4 +) + +var VolumeStatus_name = map[int32]string{ + 0: "VOLUME_STATUS_NONE", + 1: "VOLUME_STATUS_NOT_PRESENT", + 2: "VOLUME_STATUS_UP", + 3: "VOLUME_STATUS_DOWN", + 4: "VOLUME_STATUS_DEGRADED", +} +var VolumeStatus_value = map[string]int32{ + "VOLUME_STATUS_NONE": 0, + "VOLUME_STATUS_NOT_PRESENT": 1, + "VOLUME_STATUS_UP": 2, + "VOLUME_STATUS_DOWN": 3, + "VOLUME_STATUS_DEGRADED": 4, +} + +func (x VolumeStatus) String() string { + return proto.EnumName(VolumeStatus_name, int32(x)) +} +func (VolumeStatus) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +// VolumeLocator is a structure that is attached to a volume +// and is used to carry opaque metadata. +type VolumeLocator struct { + // User friendly identifier + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // A set of name-value pairs that acts as search filters + VolumeLabels map[string]string `protobuf:"bytes,2,rep,name=volume_labels" json:"volume_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *VolumeLocator) Reset() { *m = VolumeLocator{} } +func (m *VolumeLocator) String() string { return proto.CompactTextString(m) } +func (*VolumeLocator) ProtoMessage() {} +func (*VolumeLocator) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *VolumeLocator) GetVolumeLabels() map[string]string { + if m != nil { + return m.VolumeLabels + } + return nil +} + +type Source struct { + // A volume id, if specified will create a clone of the parent. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // Seed will seed the volume from the specified URI + // Any additional config for the source comes from the labels in the spec + Seed string `protobuf:"bytes,2,opt,name=seed" json:"seed,omitempty"` +} + +func (m *Source) Reset() { *m = Source{} } +func (m *Source) String() string { return proto.CompactTextString(m) } +func (*Source) ProtoMessage() {} +func (*Source) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +// VolumeSpec has the properties needed to create a volume. +type VolumeSpec struct { + // Ephemeral storage + Ephemeral bool `protobuf:"varint,1,opt,name=ephemeral" json:"ephemeral,omitempty"` + // Thin provisioned volume size in bytes + Size uint64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"` + // Format disk with this FSType + Format FSType `protobuf:"varint,3,opt,name=format,enum=openstorage.api.FSType" json:"format,omitempty"` + // Block size for filesystem + BlockSize int64 `protobuf:"varint,4,opt,name=block_size" json:"block_size,omitempty"` + // Specifies the number of nodes that are + // allowed to fail, and yet data is available + // A value of 0 implies that data is not erasure coded, + // a failure of a node will lead to data loss + HaLevel int64 `protobuf:"varint,5,opt,name=ha_level" json:"ha_level,omitempty"` + // The COS, 1 to 9 + Cos uint32 `protobuf:"varint,6,opt,name=cos" json:"cos,omitempty"` + // Perform dedupe on this disk + Dedupe bool `protobuf:"varint,7,opt,name=dedupe" json:"dedupe,omitempty"` + // SnapshotInterval in minutes, set to 0 to disable snapshots + SnapshotInterval uint32 `protobuf:"varint,8,opt,name=snapshot_interval" json:"snapshot_interval,omitempty"` + // Volume configuration labels + ConfigLabels map[string]string `protobuf:"bytes,9,rep,name=config_labels" json:"config_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *VolumeSpec) Reset() { *m = VolumeSpec{} } +func (m *VolumeSpec) String() string { return proto.CompactTextString(m) } +func (*VolumeSpec) ProtoMessage() {} +func (*VolumeSpec) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *VolumeSpec) GetConfigLabels() map[string]string { + if m != nil { + return m.ConfigLabels + } + return nil +} + +// Volume represents a live, created volume. +type Volume struct { + // Self referential volume ID + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + Source *Source `protobuf:"bytes,2,opt,name=source" json:"source,omitempty"` + Readonly bool `protobuf:"varint,3,opt,name=readonly" json:"readonly,omitempty"` + // User specified locator + Locator *VolumeLocator `protobuf:"bytes,4,opt,name=locator" json:"locator,omitempty"` + // Volume creation time + Ctime *google_protobuf.Timestamp `protobuf:"bytes,5,opt,name=ctime" json:"ctime,omitempty"` + // User specified VolumeSpec + Spec *VolumeSpec `protobuf:"bytes,6,opt,name=spec" json:"spec,omitempty"` + // Volume usage + Usage uint64 `protobuf:"varint,7,opt,name=usage" json:"usage,omitempty"` + // Time when an integrity check for run + LastScan *google_protobuf.Timestamp `protobuf:"bytes,8,opt,name=last_scan" json:"last_scan,omitempty"` + // Format FSType type if any + Format FSType `protobuf:"varint,9,opt,name=format,enum=openstorage.api.FSType" json:"format,omitempty"` + Status VolumeStatus `protobuf:"varint,10,opt,name=status,enum=openstorage.api.VolumeStatus" json:"status,omitempty"` + State VolumeState `protobuf:"varint,11,opt,name=state,enum=openstorage.api.VolumeState" json:"state,omitempty"` + // Machine ID (node) on which this volume is attached + // Machine ID is a node instance identifier for clustered systems. + AttachedOn string `protobuf:"bytes,12,opt,name=attached_on" json:"attached_on,omitempty"` + DevicePath string `protobuf:"bytes,14,opt,name=device_path" json:"device_path,omitempty"` + AttachPath string `protobuf:"bytes,15,opt,name=attach_path" json:"attach_path,omitempty"` + // Set of machine IDs (nodes) to which this volume is erasure coded - for clustered storage arrays + ReplicaSet []string `protobuf:"bytes,16,rep,name=replica_set" json:"replica_set,omitempty"` + // Last recorded error + Error string `protobuf:"bytes,17,opt,name=error" json:"error,omitempty"` +} + +func (m *Volume) Reset() { *m = Volume{} } +func (m *Volume) String() string { return proto.CompactTextString(m) } +func (*Volume) ProtoMessage() {} +func (*Volume) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *Volume) GetSource() *Source { + if m != nil { + return m.Source + } + return nil +} + +func (m *Volume) GetLocator() *VolumeLocator { + if m != nil { + return m.Locator + } + return nil +} + +func (m *Volume) GetCtime() *google_protobuf.Timestamp { + if m != nil { + return m.Ctime + } + return nil +} + +func (m *Volume) GetSpec() *VolumeSpec { + if m != nil { + return m.Spec + } + return nil +} + +func (m *Volume) GetLastScan() *google_protobuf.Timestamp { + if m != nil { + return m.LastScan + } + return nil +} + +type Stats struct { + // Reads completed successfully + Reads int64 `protobuf:"varint,1,opt,name=reads" json:"reads,omitempty"` + // Time spent in reads in ms + ReadMs int64 `protobuf:"varint,2,opt,name=read_ms" json:"read_ms,omitempty"` + ReadBytes int64 `protobuf:"varint,3,opt,name=read_bytes" json:"read_bytes,omitempty"` + // Writes completed successfully + Writes int64 `protobuf:"varint,4,opt,name=writes" json:"writes,omitempty"` + // Time spent in writes in ms + WriteMs int64 `protobuf:"varint,5,opt,name=write_ms" json:"write_ms,omitempty"` + WriteBytes int64 `protobuf:"varint,6,opt,name=write_bytes" json:"write_bytes,omitempty"` + // IOs curently in progress + IoProgress int64 `protobuf:"varint,7,opt,name=io_progress" json:"io_progress,omitempty"` + // Time spent doing IOs ms + IoMs int64 `protobuf:"varint,8,opt,name=io_ms" json:"io_ms,omitempty"` +} + +func (m *Stats) Reset() { *m = Stats{} } +func (m *Stats) String() string { return proto.CompactTextString(m) } +func (*Stats) ProtoMessage() {} +func (*Stats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +// TODO: what? +type Alerts struct { +} + +func (m *Alerts) Reset() { *m = Alerts{} } +func (m *Alerts) String() string { return proto.CompactTextString(m) } +func (*Alerts) ProtoMessage() {} +func (*Alerts) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +type VolumeCreateRequest struct { + // User specified volume name and labels + Locator *VolumeLocator `protobuf:"bytes,1,opt,name=locator" json:"locator,omitempty"` + // Source to create volume + Source *Source `protobuf:"bytes,2,opt,name=source" json:"source,omitempty"` + // The storage spec for the volume + Spec *VolumeSpec `protobuf:"bytes,3,opt,name=spec" json:"spec,omitempty"` +} + +func (m *VolumeCreateRequest) Reset() { *m = VolumeCreateRequest{} } +func (m *VolumeCreateRequest) String() string { return proto.CompactTextString(m) } +func (*VolumeCreateRequest) ProtoMessage() {} +func (*VolumeCreateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *VolumeCreateRequest) GetLocator() *VolumeLocator { + if m != nil { + return m.Locator + } + return nil +} + +func (m *VolumeCreateRequest) GetSource() *Source { + if m != nil { + return m.Source + } + return nil +} + +func (m *VolumeCreateRequest) GetSpec() *VolumeSpec { + if m != nil { + return m.Spec + } + return nil +} + +type VolumeResponse struct { + Error string `protobuf:"bytes,1,opt,name=error" json:"error,omitempty"` +} + +func (m *VolumeResponse) Reset() { *m = VolumeResponse{} } +func (m *VolumeResponse) String() string { return proto.CompactTextString(m) } +func (*VolumeResponse) ProtoMessage() {} +func (*VolumeResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +// CHANGE: error was embedded VolumeResponse +type VolumeCreateResponse struct { + // ID of the newly created volume + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + VolumeResponse *VolumeResponse `protobuf:"bytes,2,opt,name=volume_response" json:"volume_response,omitempty"` +} + +func (m *VolumeCreateResponse) Reset() { *m = VolumeCreateResponse{} } +func (m *VolumeCreateResponse) String() string { return proto.CompactTextString(m) } +func (*VolumeCreateResponse) ProtoMessage() {} +func (*VolumeCreateResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *VolumeCreateResponse) GetVolumeResponse() *VolumeResponse { + if m != nil { + return m.VolumeResponse + } + return nil +} + +// VolumeStateAction specifies desired actions. +type VolumeStateAction struct { + // Attach or Detach volume + Attach VolumeActionParam `protobuf:"varint,1,opt,name=attach,enum=openstorage.api.VolumeActionParam" json:"attach,omitempty"` + // Mount or unmount volume + Mount VolumeActionParam `protobuf:"varint,2,opt,name=mount,enum=openstorage.api.VolumeActionParam" json:"mount,omitempty"` + MountPath string `protobuf:"bytes,3,opt,name=mount_path" json:"mount_path,omitempty"` + // Device path returned in attach + DevicePath string `protobuf:"bytes,4,opt,name=device_path" json:"device_path,omitempty"` +} + +func (m *VolumeStateAction) Reset() { *m = VolumeStateAction{} } +func (m *VolumeStateAction) String() string { return proto.CompactTextString(m) } +func (*VolumeStateAction) ProtoMessage() {} +func (*VolumeStateAction) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +type VolumeSetRequest struct { + // User specified volume name and labels + Locator *VolumeLocator `protobuf:"bytes,1,opt,name=locator" json:"locator,omitempty"` + // The storage spec for the volume + Spec *VolumeSpec `protobuf:"bytes,2,opt,name=spec" json:"spec,omitempty"` + // State modification on this volume. + Action *VolumeStateAction `protobuf:"bytes,3,opt,name=action" json:"action,omitempty"` +} + +func (m *VolumeSetRequest) Reset() { *m = VolumeSetRequest{} } +func (m *VolumeSetRequest) String() string { return proto.CompactTextString(m) } +func (*VolumeSetRequest) ProtoMessage() {} +func (*VolumeSetRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *VolumeSetRequest) GetLocator() *VolumeLocator { + if m != nil { + return m.Locator + } + return nil +} + +func (m *VolumeSetRequest) GetSpec() *VolumeSpec { + if m != nil { + return m.Spec + } + return nil +} + +func (m *VolumeSetRequest) GetAction() *VolumeStateAction { + if m != nil { + return m.Action + } + return nil +} + +// CHANGE: error was embedded VolumeResponse +type VolumeSetResponse struct { + Volume *Volume `protobuf:"bytes,1,opt,name=volume" json:"volume,omitempty"` + VolumeResponse *VolumeResponse `protobuf:"bytes,2,opt,name=volume_response" json:"volume_response,omitempty"` +} + +func (m *VolumeSetResponse) Reset() { *m = VolumeSetResponse{} } +func (m *VolumeSetResponse) String() string { return proto.CompactTextString(m) } +func (*VolumeSetResponse) ProtoMessage() {} +func (*VolumeSetResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *VolumeSetResponse) GetVolume() *Volume { + if m != nil { + return m.Volume + } + return nil +} + +func (m *VolumeSetResponse) GetVolumeResponse() *VolumeResponse { + if m != nil { + return m.VolumeResponse + } + return nil +} + +type SnapCreateRequest struct { + // volume id + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + Locator *VolumeLocator `protobuf:"bytes,2,opt,name=locator" json:"locator,omitempty"` + Readonly bool `protobuf:"varint,3,opt,name=readonly" json:"readonly,omitempty"` +} + +func (m *SnapCreateRequest) Reset() { *m = SnapCreateRequest{} } +func (m *SnapCreateRequest) String() string { return proto.CompactTextString(m) } +func (*SnapCreateRequest) ProtoMessage() {} +func (*SnapCreateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *SnapCreateRequest) GetLocator() *VolumeLocator { + if m != nil { + return m.Locator + } + return nil +} + +type SnapCreateResponse struct { + VolumeCreateResponse *VolumeCreateResponse `protobuf:"bytes,1,opt,name=volume_create_response" json:"volume_create_response,omitempty"` +} + +func (m *SnapCreateResponse) Reset() { *m = SnapCreateResponse{} } +func (m *SnapCreateResponse) String() string { return proto.CompactTextString(m) } +func (*SnapCreateResponse) ProtoMessage() {} +func (*SnapCreateResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +func (m *SnapCreateResponse) GetVolumeCreateResponse() *VolumeCreateResponse { + if m != nil { + return m.VolumeCreateResponse + } + return nil +} + +type VolumeInfo struct { + VolumeId string `protobuf:"bytes,1,opt,name=volume_id" json:"volume_id,omitempty"` + Path string `protobuf:"bytes,2,opt,name=path" json:"path,omitempty"` + Storage *VolumeSpec `protobuf:"bytes,3,opt,name=storage" json:"storage,omitempty"` +} + +func (m *VolumeInfo) Reset() { *m = VolumeInfo{} } +func (m *VolumeInfo) String() string { return proto.CompactTextString(m) } +func (*VolumeInfo) ProtoMessage() {} +func (*VolumeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +func (m *VolumeInfo) GetStorage() *VolumeSpec { + if m != nil { + return m.Storage + } + return nil +} + +// GraphDriverChanges represent a list of changes between the filesystem layers +// specified by the ID and Parent. // Parent may be an empty string, in which +// case there is no parent. +// Where the Path is the filesystem path within the layered filesystem +// CHANGE: kind was an int +type GraphDriverChanges struct { + Path string `protobuf:"bytes,1,opt,name=path" json:"path,omitempty"` + Kind GraphDriverChangeType `protobuf:"varint,2,opt,name=kind,enum=openstorage.api.GraphDriverChangeType" json:"kind,omitempty"` +} + +func (m *GraphDriverChanges) Reset() { *m = GraphDriverChanges{} } +func (m *GraphDriverChanges) String() string { return proto.CompactTextString(m) } +func (*GraphDriverChanges) ProtoMessage() {} +func (*GraphDriverChanges) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +func init() { + proto.RegisterType((*VolumeLocator)(nil), "openstorage.api.VolumeLocator") + proto.RegisterType((*Source)(nil), "openstorage.api.Source") + proto.RegisterType((*VolumeSpec)(nil), "openstorage.api.VolumeSpec") + proto.RegisterType((*Volume)(nil), "openstorage.api.Volume") + proto.RegisterType((*Stats)(nil), "openstorage.api.Stats") + proto.RegisterType((*Alerts)(nil), "openstorage.api.Alerts") + proto.RegisterType((*VolumeCreateRequest)(nil), "openstorage.api.VolumeCreateRequest") + proto.RegisterType((*VolumeResponse)(nil), "openstorage.api.VolumeResponse") + proto.RegisterType((*VolumeCreateResponse)(nil), "openstorage.api.VolumeCreateResponse") + proto.RegisterType((*VolumeStateAction)(nil), "openstorage.api.VolumeStateAction") + proto.RegisterType((*VolumeSetRequest)(nil), "openstorage.api.VolumeSetRequest") + proto.RegisterType((*VolumeSetResponse)(nil), "openstorage.api.VolumeSetResponse") + proto.RegisterType((*SnapCreateRequest)(nil), "openstorage.api.SnapCreateRequest") + proto.RegisterType((*SnapCreateResponse)(nil), "openstorage.api.SnapCreateResponse") + proto.RegisterType((*VolumeInfo)(nil), "openstorage.api.VolumeInfo") + proto.RegisterType((*GraphDriverChanges)(nil), "openstorage.api.GraphDriverChanges") + proto.RegisterEnum("openstorage.api.Status", Status_name, Status_value) + proto.RegisterEnum("openstorage.api.DriverType", DriverType_name, DriverType_value) + proto.RegisterEnum("openstorage.api.FSType", FSType_name, FSType_value) + proto.RegisterEnum("openstorage.api.GraphDriverChangeType", GraphDriverChangeType_name, GraphDriverChangeType_value) + proto.RegisterEnum("openstorage.api.VolumeActionParam", VolumeActionParam_name, VolumeActionParam_value) + proto.RegisterEnum("openstorage.api.VolumeState", VolumeState_name, VolumeState_value) + proto.RegisterEnum("openstorage.api.VolumeStatus", VolumeStatus_name, VolumeStatus_value) +} + +var fileDescriptor0 = []byte{ + // 1370 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xa4, 0x56, 0xcd, 0x72, 0xdb, 0x54, + 0x14, 0x46, 0x96, 0xed, 0x24, 0xc7, 0x71, 0x62, 0xdf, 0xfe, 0xa9, 0xe9, 0xbf, 0x86, 0x96, 0x62, + 0xa8, 0x03, 0x6e, 0x17, 0x1d, 0x76, 0x8e, 0x2d, 0x27, 0x06, 0xd7, 0xf6, 0xd8, 0x4a, 0x28, 0x85, + 0x19, 0x8d, 0x22, 0xdf, 0xc4, 0xa6, 0xb2, 0x24, 0x24, 0x39, 0x4c, 0x78, 0x09, 0x56, 0x30, 0x03, + 0xec, 0x60, 0xc9, 0x82, 0x47, 0xe2, 0x45, 0x58, 0x70, 0xee, 0xbd, 0x52, 0x22, 0xc5, 0xb1, 0x9b, + 0x81, 0x45, 0x26, 0x73, 0xbf, 0x73, 0xee, 0xf9, 0xf9, 0xce, 0x77, 0xae, 0x0c, 0x45, 0xd3, 0x9b, + 0x6c, 0xe3, 0x5f, 0xd5, 0xf3, 0xdd, 0xd0, 0x25, 0x9b, 0xae, 0x47, 0x9d, 0x20, 0x74, 0x7d, 0xf3, + 0x98, 0x56, 0x11, 0xde, 0x7a, 0x70, 0xec, 0xba, 0xc7, 0x36, 0xdd, 0xe6, 0xe6, 0xc3, 0xd9, 0xd1, + 0x76, 0x38, 0x99, 0xd2, 0x20, 0x34, 0xa7, 0x9e, 0xb8, 0xa1, 0xfe, 0x26, 0x41, 0xf1, 0xc0, 0xb5, + 0x67, 0x53, 0xda, 0x71, 0x2d, 0x13, 0x6f, 0x92, 0x75, 0xc8, 0x3a, 0xe6, 0x94, 0x2a, 0xd2, 0x43, + 0xe9, 0xe9, 0x1a, 0xd9, 0x85, 0xe2, 0x09, 0x37, 0x1b, 0xb6, 0x79, 0x48, 0xed, 0x40, 0xc9, 0x3c, + 0x94, 0x9f, 0x16, 0x6a, 0x9f, 0x54, 0x2f, 0x64, 0xaa, 0xa6, 0x82, 0xc4, 0x27, 0x7e, 0x45, 0x73, + 0x42, 0xff, 0x74, 0xeb, 0x39, 0x94, 0xe7, 0x40, 0x52, 0x00, 0xf9, 0x2d, 0x3d, 0x8d, 0x52, 0x15, + 0x21, 0x77, 0x62, 0xda, 0x33, 0x8a, 0x29, 0xf0, 0xf8, 0x59, 0xe6, 0xa5, 0xa4, 0x3e, 0x81, 0xfc, + 0xd0, 0x9d, 0xf9, 0x16, 0x25, 0x1b, 0x90, 0xf7, 0x4c, 0x9f, 0x3a, 0x61, 0xe4, 0x8c, 0x55, 0x06, + 0x94, 0x8e, 0x84, 0xaf, 0xfa, 0x57, 0x06, 0x40, 0x44, 0x1f, 0x7a, 0xd4, 0x22, 0x65, 0x58, 0xa3, + 0xde, 0x98, 0x4e, 0xa9, 0x6f, 0xda, 0xdc, 0x7f, 0x95, 0xfb, 0x4f, 0x7e, 0x10, 0xb1, 0xb3, 0xe4, + 0x03, 0xc8, 0x1f, 0xb9, 0xfe, 0xd4, 0x0c, 0x15, 0x19, 0xcf, 0x1b, 0xb5, 0x5b, 0x73, 0xed, 0xb4, + 0x86, 0xfa, 0xa9, 0x47, 0x09, 0x01, 0x38, 0xb4, 0x5d, 0xeb, 0xad, 0xc1, 0x2f, 0x67, 0xd1, 0x59, + 0x26, 0x25, 0x58, 0x1d, 0x9b, 0x86, 0x4d, 0x4f, 0xa8, 0xad, 0xe4, 0x38, 0x82, 0x6d, 0x58, 0x6e, + 0xa0, 0xe4, 0xf1, 0x50, 0x64, 0x95, 0x8e, 0xe8, 0x68, 0xe6, 0x51, 0x65, 0x85, 0x67, 0xbe, 0x0d, + 0xe5, 0xc0, 0x31, 0xbd, 0x60, 0xec, 0x86, 0xc6, 0xc4, 0x09, 0xa9, 0x8f, 0x4d, 0x2a, 0xab, 0xdc, + 0xb5, 0x09, 0x45, 0xcb, 0x75, 0x8e, 0x26, 0xc7, 0x31, 0xb9, 0x6b, 0x9c, 0xdc, 0x67, 0x0b, 0xc8, + 0x65, 0xbd, 0x55, 0x1b, 0xfc, 0xc2, 0x05, 0x66, 0xe7, 0xc0, 0x77, 0x32, 0xfb, 0x8f, 0x0c, 0x79, + 0x11, 0x15, 0x5b, 0xcc, 0x4c, 0x46, 0x91, 0x27, 0x12, 0x13, 0x70, 0xc2, 0xb9, 0x6b, 0xe1, 0x12, + 0x62, 0xa2, 0x79, 0x20, 0x09, 0x3e, 0x35, 0x47, 0xae, 0x63, 0x9f, 0x72, 0x0e, 0x57, 0xc9, 0x36, + 0xac, 0xd8, 0x62, 0xfa, 0x9c, 0xa7, 0x42, 0xed, 0xfe, 0x72, 0x8d, 0x90, 0x0f, 0x21, 0x67, 0x31, + 0x39, 0x72, 0x12, 0x0b, 0xb5, 0xad, 0xaa, 0xd0, 0x6a, 0x35, 0xd6, 0x6a, 0x55, 0x8f, 0xb5, 0x8a, + 0xae, 0xd9, 0x00, 0x9b, 0xe7, 0x0c, 0x17, 0x6a, 0x77, 0x96, 0xf0, 0xc3, 0x7a, 0x9d, 0x05, 0x88, + 0x73, 0xf6, 0xb3, 0xe4, 0x19, 0xac, 0xd9, 0x66, 0x10, 0x1a, 0x81, 0x65, 0x3a, 0x9c, 0xf5, 0xe5, + 0x89, 0xce, 0x85, 0xb1, 0xb6, 0x5c, 0x18, 0xcf, 0x90, 0xa8, 0xd0, 0x0c, 0x67, 0x81, 0x02, 0xdc, + 0xf1, 0xde, 0xa2, 0x9a, 0xb8, 0x13, 0xf9, 0x08, 0x72, 0xcc, 0x9d, 0x2a, 0x05, 0xee, 0x7d, 0x77, + 0x89, 0x37, 0x25, 0xd7, 0xa0, 0x60, 0x86, 0xa1, 0x69, 0x8d, 0xe9, 0xc8, 0x70, 0x1d, 0x65, 0x9d, + 0x4f, 0x06, 0xc1, 0x11, 0x3d, 0x99, 0x58, 0xd4, 0xf0, 0xcc, 0x70, 0xac, 0x6c, 0xc4, 0xa0, 0xf0, + 0x14, 0xe0, 0x66, 0x0c, 0xfa, 0xd4, 0xb3, 0x27, 0x96, 0x69, 0x04, 0x34, 0x54, 0x4a, 0xa8, 0x29, + 0x2e, 0x01, 0xea, 0xfb, 0x38, 0x9b, 0x32, 0x5f, 0x98, 0x9f, 0x25, 0xc8, 0xb1, 0x64, 0x01, 0x33, + 0xb0, 0x41, 0x06, 0x5c, 0x00, 0x32, 0xd9, 0x84, 0x15, 0x76, 0x34, 0xa6, 0x01, 0x57, 0x80, 0xcc, + 0x36, 0x80, 0x03, 0x87, 0xa7, 0x21, 0x0d, 0xf8, 0xa8, 0x65, 0x26, 0xf1, 0xef, 0xfd, 0x09, 0x3b, + 0x9f, 0x6d, 0x04, 0x3f, 0xb3, 0x5b, 0x62, 0x23, 0xb0, 0x06, 0x81, 0x88, 0x6b, 0xf9, 0x18, 0x9c, + 0xb8, 0x06, 0xb2, 0x7e, 0xec, 0xd3, 0x20, 0xe0, 0x03, 0x92, 0x59, 0x7e, 0x04, 0xf1, 0x22, 0x1b, + 0x8e, 0xac, 0xae, 0x42, 0xbe, 0x6e, 0x53, 0x3f, 0x0c, 0xd4, 0x5f, 0x25, 0xb8, 0x26, 0x58, 0x69, + 0x60, 0x01, 0x21, 0x1d, 0xd0, 0xef, 0x66, 0x38, 0xa5, 0xa4, 0xce, 0xa4, 0x2b, 0xe9, 0xec, 0xca, + 0x9a, 0x8e, 0x55, 0x26, 0xbf, 0x53, 0x65, 0xea, 0x03, 0xd8, 0x10, 0xa7, 0x01, 0x0d, 0x3c, 0xd7, + 0x09, 0xe8, 0x39, 0xc1, 0x7c, 0x91, 0xd4, 0x6f, 0xe0, 0x7a, 0xba, 0xf8, 0xc8, 0x2d, 0xb9, 0x6c, + 0x2f, 0x61, 0x33, 0x7a, 0x5b, 0xfd, 0xc8, 0x1c, 0x55, 0xf8, 0x60, 0x41, 0xea, 0x38, 0x8a, 0xfa, + 0x87, 0x14, 0xbf, 0xa6, 0x5c, 0x31, 0x75, 0xdc, 0x23, 0xd7, 0x21, 0x35, 0xc8, 0x0b, 0x35, 0xf0, + 0xf8, 0x1b, 0x35, 0x75, 0x41, 0x18, 0xe1, 0xde, 0x37, 0x7d, 0x73, 0x4a, 0x3e, 0x85, 0xdc, 0xd4, + 0x9d, 0xe1, 0xb3, 0x9a, 0xb9, 0xf2, 0x15, 0xec, 0x81, 0x5f, 0x11, 0x9a, 0x93, 0x2f, 0x53, 0x67, + 0x96, 0x73, 0xf0, 0xbb, 0x04, 0xa5, 0xa8, 0x4a, 0x1a, 0xfe, 0xe7, 0xf1, 0xc5, 0x53, 0xc9, 0xbc, + 0x7b, 0xf7, 0x19, 0x01, 0xbc, 0xd0, 0x68, 0x84, 0xea, 0xb2, 0x35, 0x13, 0x2d, 0xa9, 0x27, 0x67, + 0x4c, 0xb2, 0x1a, 0xa3, 0x29, 0xa1, 0x64, 0xc4, 0x64, 0xa2, 0x1a, 0x6f, 0x2d, 0x08, 0xf4, 0x3f, + 0x46, 0x78, 0x08, 0xe5, 0x21, 0x7e, 0x16, 0xd2, 0xda, 0x4e, 0xaa, 0x23, 0x41, 0x54, 0xe6, 0x4a, + 0x44, 0xcd, 0x3d, 0xc9, 0xea, 0xd7, 0x40, 0x92, 0x39, 0xa2, 0xe6, 0x34, 0xb8, 0x19, 0xd5, 0x6c, + 0x71, 0xc3, 0x79, 0xe9, 0xa2, 0xd9, 0xc7, 0x0b, 0xf2, 0xa4, 0xc3, 0x60, 0xf0, 0xe8, 0x93, 0xdb, + 0x76, 0x8e, 0x5c, 0xf6, 0xc9, 0x8d, 0x82, 0x9e, 0x35, 0x80, 0x9f, 0x5c, 0x2e, 0x06, 0xfe, 0xd1, + 0x21, 0x1f, 0xc3, 0x4a, 0x14, 0xf2, 0x2a, 0xfb, 0xf5, 0x1a, 0xc8, 0xae, 0x6f, 0x7a, 0xe3, 0xa6, + 0x3f, 0x39, 0xa1, 0x7e, 0x63, 0x6c, 0x3a, 0xc7, 0x34, 0x38, 0x8b, 0x28, 0xe2, 0xbf, 0x80, 0xec, + 0xdb, 0x89, 0x33, 0x8a, 0x94, 0xfb, 0x64, 0x2e, 0xdc, 0x5c, 0x00, 0xf6, 0x70, 0x57, 0x0c, 0xfc, + 0x49, 0x21, 0xde, 0xe4, 0x4d, 0x28, 0x0c, 0xf5, 0xba, 0xbe, 0x3f, 0x34, 0xba, 0xbd, 0xae, 0x56, + 0x7a, 0x2f, 0x01, 0xb4, 0xbb, 0x6d, 0xbd, 0x24, 0xe1, 0x4e, 0xaf, 0x45, 0x40, 0xef, 0x8b, 0x52, + 0x06, 0x85, 0xbf, 0x11, 0x1f, 0x5b, 0xad, 0x4e, 0x1b, 0xef, 0xb0, 0xa7, 0x6f, 0x3d, 0xc2, 0xb4, + 0xc1, 0xa0, 0x37, 0x28, 0x65, 0x2b, 0xbf, 0x48, 0x00, 0x22, 0x2b, 0xff, 0x50, 0x5c, 0x87, 0x52, + 0x73, 0xd0, 0x3e, 0xd0, 0x06, 0x86, 0xfe, 0x55, 0x5f, 0x8b, 0x53, 0x5d, 0x40, 0x5b, 0xed, 0x8e, + 0x86, 0xf9, 0x6e, 0x40, 0x39, 0x89, 0xee, 0x74, 0x7a, 0x0d, 0x96, 0xf7, 0x26, 0x90, 0x24, 0xdc, + 0xdb, 0xf9, 0x5c, 0x6b, 0xe8, 0x98, 0xfb, 0x36, 0xdc, 0x48, 0xe2, 0x8d, 0xce, 0xfe, 0x50, 0xd7, + 0x06, 0x5a, 0xb3, 0x94, 0xbd, 0x18, 0x69, 0x77, 0x50, 0xef, 0xef, 0x95, 0x72, 0x95, 0x9f, 0x24, + 0xc8, 0x47, 0x1f, 0x30, 0x2c, 0xbc, 0x35, 0x4c, 0xd5, 0x54, 0x86, 0x62, 0x8c, 0xec, 0xe8, 0x83, + 0xd6, 0x10, 0x0b, 0x4a, 0x38, 0x69, 0xaf, 0xf5, 0x17, 0x58, 0x4b, 0x02, 0x69, 0xed, 0x0f, 0x19, + 0x03, 0xc8, 0xda, 0x59, 0x20, 0xbc, 0x94, 0x4d, 0x02, 0x07, 0x08, 0xe4, 0x92, 0xc0, 0x6b, 0x04, + 0xf2, 0x49, 0xe0, 0x0d, 0x02, 0x2b, 0x95, 0x3f, 0x25, 0xb8, 0x71, 0xe9, 0xb8, 0xc8, 0x23, 0xb8, + 0xc7, 0x8b, 0x37, 0xa2, 0x76, 0x1a, 0x7b, 0xf5, 0xee, 0xae, 0x96, 0xaa, 0xfb, 0x31, 0x3c, 0x5a, + 0xe8, 0xf2, 0xaa, 0xd7, 0x6c, 0xb7, 0xda, 0x48, 0x89, 0x44, 0x54, 0xb8, 0xbf, 0xd0, 0xad, 0xde, + 0x6c, 0xa2, 0x4f, 0x86, 0xbc, 0x0f, 0x0f, 0x17, 0xfa, 0x34, 0xb5, 0x8e, 0xa6, 0xa3, 0x97, 0x5c, + 0xf9, 0x36, 0x7e, 0x32, 0x92, 0xaf, 0xe2, 0x5d, 0x50, 0x0e, 0x7a, 0x9d, 0xfd, 0x57, 0x18, 0xac, + 0xa1, 0xb7, 0x7b, 0x5d, 0xa3, 0x5f, 0x1f, 0xd4, 0x5f, 0xc5, 0x35, 0xde, 0x81, 0x5b, 0x97, 0x59, + 0x51, 0x47, 0x58, 0xd9, 0x16, 0xdc, 0xbc, 0xd4, 0xd8, 0x2d, 0x65, 0x2a, 0x7f, 0x4b, 0x50, 0x48, + 0xfe, 0x36, 0xc0, 0xc1, 0x46, 0xbe, 0x4c, 0x76, 0x67, 0x1c, 0x28, 0xf8, 0xb9, 0x49, 0xc2, 0x7d, + 0xad, 0xdb, 0x6c, 0x77, 0x77, 0x53, 0xc1, 0x85, 0xa5, 0x7e, 0x50, 0x6f, 0x77, 0xea, 0x3b, 0xa8, + 0xb7, 0x0c, 0x13, 0x50, 0xda, 0xa6, 0xeb, 0xf5, 0xc6, 0x1e, 0xeb, 0x71, 0xce, 0xd4, 0xd4, 0x22, + 0x53, 0x36, 0xd1, 0xcb, 0xb9, 0x49, 0x6f, 0xec, 0xb1, 0x74, 0x39, 0xa6, 0xd5, 0x94, 0x51, 0x6c, + 0x45, 0x7e, 0xae, 0xc0, 0x98, 0xcd, 0x95, 0xca, 0x8f, 0x12, 0xac, 0xa7, 0x7e, 0x2b, 0xa5, 0x43, + 0x9c, 0xaf, 0xe7, 0x3d, 0xb8, 0x7d, 0x11, 0xd7, 0x8d, 0xfe, 0x40, 0x1b, 0x6a, 0x5d, 0xb6, 0xac, + 0xb8, 0x52, 0x69, 0xf3, 0x7e, 0x5f, 0xec, 0x4e, 0x1a, 0x6d, 0xf6, 0xbe, 0xec, 0x62, 0x7f, 0x69, + 0x5a, 0x18, 0xae, 0xe1, 0xe4, 0x99, 0x0a, 0xb2, 0x3b, 0x77, 0xe1, 0x9a, 0xe5, 0x4e, 0x2f, 0xbe, + 0x27, 0x7d, 0xe9, 0x8d, 0x8c, 0xff, 0x0e, 0xf3, 0xfc, 0x67, 0xe3, 0xf3, 0x7f, 0x03, 0x00, 0x00, + 0xff, 0xff, 0x21, 0x1a, 0x13, 0x1a, 0x7b, 0x0d, 0x00, 0x00, +} diff --git a/api/api.proto b/api/api.proto new file mode 100644 index 000000000..c60a5180f --- /dev/null +++ b/api/api.proto @@ -0,0 +1,260 @@ +syntax = "proto3"; + +import "google/protobuf/timestamp.proto"; + +package openstorage.api; + +option go_package = "api"; +option java_multiple_files = true; +option java_package = "com.openstorage.api"; + +// CHANGE: no NONE, was 1 << iota +enum Status { + STATUS_NONE = 0; + STATUS_INIT = 1; + STATUS_OK = 2; + STATUS_OFFLINE = 3; + STATUS_ERROR = 4; +} + +// CHANGE: no NONE, was 1 << iota +enum DriverType { + DRIVER_TYPE_NONE = 0; + DRIVER_TYPE_FILE = 1; + DRIVER_TYPE_BLOCK = 2; + DRIVER_TYPE_OBJECT = 3; + DRIVER_TYPE_CLUSTERED = 4; + DRIVER_TYPE_GRAPH = 5; +} + +// CHANGE: was Filesystem, no NONE, was a string +enum FSType { + FS_TYPE_NONE = 0; + FS_TYPE_BTRFS = 1; + FS_TYPE_EXT4 = 2; + FS_TYPE_FUSE = 3; + FS_TYPE_NFS = 4; + FS_TYPE_VFS = 5; + FS_TYPE_XFS = 6; + FS_TYPE_ZFS = 7; +} + +// CHANGE: was an int, 0 was modified, 1 was added, 2 was deleted +enum GraphDriverChangeType { + GRAPH_DRIVER_CHANGE_TYPE_NONE = 0; + GRAPH_DRIVER_CHANGE_TYPE_MODIFIED = 1; + GRAPH_DRIVER_CHANGE_TYPE_ADDED = 2; + GRAPH_DRIVER_CHANGE_TYPE_DELETED = 3; +} + +// CHANGE: no NONE +enum VolumeActionParam { + VOLUME_ACTION_PARAM_NONE = 0; + // Maps to the boolean value false + VOLUME_ACTION_PARAM_OFF = 1; + // Maps to the boolean value true. + VOLUME_ACTION_PARAM_ON = 2; +} + +// VolumeState represents the state of a volume. +// CHANGE: no NONE, was 1 << iota (and was bit or'ed/and'ed) +enum VolumeState { + VOLUME_STATE_NONE = 0; + // Volume is transitioning to new state + VOLUME_STATE_PENDING = 1; + // Volume is ready to be assigned to a container + VOLUME_STATE_AVAILABLE = 2; + // Volume is attached to container + VOLUME_STATE_ATTACHED = 3; + // Volume is detached but associated with a container + VOLUME_STATE_DETACHED = 4; + // Volume detach is in progress + VOLUME_STATE_DETATCHING = 5; + // Volume is in error state + VOLUME_STATE_ERROR = 6; + // Volume is deleted, it will remain in this state + // while resources are asynchronously reclaimed + VOLUME_STATE_DELETED = 7; +} + +// VolumeStatus represents a health status for a volume. +// CHANGE: no NONE, was a string +enum VolumeStatus { + VOLUME_STATUS_NONE = 0; + // Volume is not present + VOLUME_STATUS_NOT_PRESENT = 1; + // Volume is healthy + VOLUME_STATUS_UP = 2; + // Volume is in fail mode + VOLUME_STATUS_DOWN = 3; + // Volume is up but with degraded performance + // In a RAID group, this may indicate a problem with one or more drives + VOLUME_STATUS_DEGRADED = 4; +} + +// VolumeLocator is a structure that is attached to a volume +// and is used to carry opaque metadata. +message VolumeLocator { + // User friendly identifier + string name = 1; + // A set of name-value pairs that acts as search filters + map volume_labels = 2; +} + +message Source { + // A volume id, if specified will create a clone of the parent. + string parent = 1; + // Seed will seed the volume from the specified URI + // Any additional config for the source comes from the labels in the spec + string seed = 2; +} + +// VolumeSpec has the properties needed to create a volume. +message VolumeSpec { + // Ephemeral storage + bool ephemeral = 1; + // Thin provisioned volume size in bytes + uint64 size = 2; + // Format disk with this FSType + FSType format = 3; + // Block size for filesystem + int64 block_size = 4; + // Specifies the number of nodes that are + // allowed to fail, and yet data is available + // A value of 0 implies that data is not erasure coded, + // a failure of a node will lead to data loss + int64 ha_level = 5; + // The COS, 1 to 9 + uint32 cos = 6; + // Perform dedupe on this disk + bool dedupe = 7; + // SnapshotInterval in minutes, set to 0 to disable snapshots + uint32 snapshot_interval = 8; + // Volume configuration labels + map config_labels = 9; +} + +// Volume represents a live, created volume. +message Volume { + // Self referential volume ID + string id = 1; + Source source = 2; + bool readonly = 3; + // User specified locator + VolumeLocator locator = 4; + // Volume creation time + google.protobuf.Timestamp ctime = 5; + // User specified VolumeSpec + VolumeSpec spec = 6; + // Volume usage + uint64 usage = 7; + // Time when an integrity check for run + google.protobuf.Timestamp last_scan = 8; + // Format FSType type if any + FSType format = 9; + VolumeStatus status = 10; + VolumeState state = 11; + // Machine ID (node) on which this volume is attached + // Machine ID is a node instance identifier for clustered systems. + string attached_on = 12; + string device_path = 14; + string attach_path = 15; + // Set of machine IDs (nodes) to which this volume is erasure coded - for clustered storage arrays + repeated string replica_set = 16; + // Last recorded error + string error = 17; +} + +message Stats { + // Reads completed successfully + int64 reads = 1; + // Time spent in reads in ms + int64 read_ms = 2; + int64 read_bytes = 3; + // Writes completed successfully + int64 writes = 4; + // Time spent in writes in ms + int64 write_ms = 5; + int64 write_bytes = 6; + // IOs curently in progress + int64 io_progress = 7; + // Time spent doing IOs ms + int64 io_ms = 8; +} + +// TODO: what? +message Alerts { +} + +message VolumeCreateRequest { + // User specified volume name and labels + VolumeLocator locator = 1; + // Source to create volume + Source source = 2; + // The storage spec for the volume + VolumeSpec spec = 3; +} + +message VolumeResponse { + string error = 1; +} + +// CHANGE: error was embedded VolumeResponse +message VolumeCreateResponse { + // ID of the newly created volume + string id = 1; + VolumeResponse volume_response = 2; +} + +// VolumeStateAction specifies desired actions. +message VolumeStateAction { + // Attach or Detach volume + VolumeActionParam attach = 1; + // Mount or unmount volume + VolumeActionParam mount = 2; + string mount_path = 3; + // Device path returned in attach + string device_path = 4; +} + +message VolumeSetRequest { + // User specified volume name and labels + VolumeLocator locator = 1; + // The storage spec for the volume + VolumeSpec spec = 2; + // State modification on this volume. + VolumeStateAction action = 3; +} + +// CHANGE: error was embedded VolumeResponse +message VolumeSetResponse { + Volume volume = 1; + VolumeResponse volume_response = 2; +} + +message SnapCreateRequest { + // volume id + string id = 1; + VolumeLocator locator = 2; + bool readonly = 3; +} + +message SnapCreateResponse { + VolumeCreateResponse volume_create_response = 1; +} + +message VolumeInfo { + string volume_id = 1; + string path = 2; + VolumeSpec storage = 3; +} + +// GraphDriverChanges represent a list of changes between the filesystem layers +// specified by the ID and Parent. // Parent may be an empty string, in which +// case there is no parent. +// Where the Path is the filesystem path within the layered filesystem +// CHANGE: kind was an int +message GraphDriverChanges { + string path = 1; + GraphDriverChangeType kind = 2; +} diff --git a/api/client/client_test.go b/api/client/client_test.go index 35773d7f8..982ddbc2b 100644 --- a/api/client/client_test.go +++ b/api/client/client_test.go @@ -5,6 +5,8 @@ import ( "testing" "time" + "go.pedge.io/dlog" + "github.com/libopenstorage/openstorage/api" "github.com/libopenstorage/openstorage/api/server" "github.com/libopenstorage/openstorage/config" @@ -17,13 +19,17 @@ var ( testPath = string("/tmp/openstorage_client_test") ) +func init() { + dlog.SetLevel(dlog.LevelDebug) +} + func makeRequest(t *testing.T) { c, err := NewDriverClient(nfs.Name) if err != nil { t.Fatalf("Failed to create client: %v", err) } d := c.VolumeDriver() - _, err = d.Inspect([]api.VolumeID{api.VolumeID("foo")}) + _, err = d.Inspect([]string{"foo"}) if err != nil { t.Fatalf("Failed to create client: %v", err) } @@ -48,7 +54,7 @@ func TestAll(t *testing.T) { } d := c.VolumeDriver() ctx := test.NewContext(d) - ctx.Filesystem = string("btrfs") + ctx.Filesystem = api.FSType_FS_TYPE_BTRFS test.Run(t, ctx) } diff --git a/api/client/request.go b/api/client/request.go index 3da5c133d..45a7f6854 100644 --- a/api/client/request.go +++ b/api/client/request.go @@ -239,51 +239,37 @@ func (r *Request) Do() *Response { resp *http.Response url string body []byte - response *Response ) - if r.err != nil { - err = r.err - goto done + return &Response{err: r.err} } - url = r.URL().String() req, err = http.NewRequest(r.verb, url, bytes.NewBuffer(r.body)) if err != nil { - goto done + return &Response{err: err} } if r.headers == nil { r.headers = http.Header{} } - req.Header = r.headers req.Header.Set("Content-Type", "application/json") resp, err = r.client.Do(req) if err != nil { - goto done + return &Response{err: err} } - if resp.Body != nil { defer resp.Body.Close() body, err = ioutil.ReadAll(resp.Body) } - if err != nil { - goto done + return &Response{err: err} } - - response = &Response{ + return &Response{ status: resp.Status, statusCode: resp.StatusCode, body: body, err: parseHTTPStatus(resp, body), } - -done: - if err != nil { - return &Response{err: err} - } - return response } // Body return http body, valid only if there is no error diff --git a/api/client/volume.go b/api/client/volume.go index 7a15519c4..e318f57c4 100644 --- a/api/client/volume.go +++ b/api/client/volume.go @@ -11,6 +11,12 @@ import ( "github.com/libopenstorage/openstorage/volume" ) +const ( + graphPath = "/graph" + volumePath = "/volumes" + snapPath = "/snapshot" +) + type volumeClient struct { *volume.IoNotSupported c *Client @@ -27,102 +33,79 @@ func (v *volumeClient) String() string { func (v *volumeClient) Type() api.DriverType { // Block drivers implement the superset. - return api.Block + return api.DriverType_DRIVER_TYPE_BLOCK } -const ( - graphPath = "/graph" - volumePath = "/volumes" - snapPath = "/snapshot" -) - -func (v *volumeClient) GraphDriverCreate(id, parent string) error { - resp := "" - err := v.c.Put().Resource(graphPath + "/create").Instance(id).Do().Unmarshal(&resp) - if err != nil { +func (v *volumeClient) GraphDriverCreate(id string, parent string) error { + response := "" + if err := v.c.Put().Resource(graphPath + "/create").Instance(id).Do().Unmarshal(&response); err != nil { return err } - - if resp != id { - return fmt.Errorf("Invalid response: %v", resp) + if response != id { + return fmt.Errorf("Invalid response: %s", response) } - return nil } func (v *volumeClient) GraphDriverRemove(id string) error { - resp := "" - err := v.c.Put().Resource(graphPath + "/remove").Instance(id).Do().Unmarshal(&resp) - if err != nil { + response := "" + if err := v.c.Put().Resource(graphPath + "/remove").Instance(id).Do().Unmarshal(&response); err != nil { return err } - - if resp != id { - return fmt.Errorf("Invalid response: %v", resp) + if response != id { + return fmt.Errorf("Invalid response: %s", response) } - return nil } -func (v *volumeClient) GraphDriverGet(id, mountLabel string) (string, error) { - resp := "" - err := v.c.Get().Resource(graphPath + "/inspect").Instance(id).Do().Unmarshal(&resp) - if err != nil { +func (v *volumeClient) GraphDriverGet(id string, mountLabel string) (string, error) { + response := "" + if err := v.c.Get().Resource(graphPath + "/inspect").Instance(id).Do().Unmarshal(&response); err != nil { return "", err } - - return resp, nil + return response, nil } func (v *volumeClient) GraphDriverRelease(id string) error { - resp := "" - err := v.c.Put().Resource(graphPath + "/release").Instance(id).Do().Unmarshal(&resp) - if err != nil { + response := "" + if err := v.c.Put().Resource(graphPath + "/release").Instance(id).Do().Unmarshal(&response); err != nil { return err } - - if resp != id { - return fmt.Errorf("Invalid response: %v", resp) + if response != id { + return fmt.Errorf("Invalid response: %v", response) } - return nil } func (v *volumeClient) GraphDriverExists(id string) bool { - resp := false - v.c.Get().Resource(graphPath + "/exists").Instance(id).Do().Unmarshal(&resp) - return resp + response := false + v.c.Get().Resource(graphPath + "/exists").Instance(id).Do().Unmarshal(&response) + return response } -func (v *volumeClient) GraphDriverDiff(id, parent string) io.Writer { - path := graphPath + "/diff?id=" + id + "&parent=" + parent - resp := v.c.Get().Resource(path).Do() - return bytes.NewBuffer(resp.body) +func (v *volumeClient) GraphDriverDiff(id string, parent string) io.Writer { + return bytes.NewBuffer(v.c.Get().Resource(graphPath + "/diff?id=" + id + "&parent=" + parent).Do().body) } -func (v *volumeClient) GraphDriverChanges(id, parent string) ([]api.GraphDriverChanges, error) { +func (v *volumeClient) GraphDriverChanges(id string, parent string) ([]api.GraphDriverChanges, error) { var changes []api.GraphDriverChanges err := v.c.Get().Resource(graphPath + "/changes").Instance(id).Do().Unmarshal(&changes) return changes, err } -func (v *volumeClient) GraphDriverApplyDiff(id, parent string, diff io.Reader) (int, error) { - resp := 0 - path := graphPath + "/diff?id=" + id + "&parent=" + parent - +func (v *volumeClient) GraphDriverApplyDiff(id string, parent string, diff io.Reader) (int, error) { b, err := ioutil.ReadAll(diff) if err != nil { return 0, err } - - err = v.c.Put().Resource(path).Instance(id).Body(b).Do().Unmarshal(&resp) - if err != nil { + response := 0 + if err = v.c.Put().Resource(graphPath + "/diff?id=" + id + "&parent=" + parent).Instance(id).Body(b).Do().Unmarshal(&response); err != nil { return 0, err } - return resp, nil + return response, nil } -func (v *volumeClient) GraphDriverDiffSize(id, parent string) (int, error) { +func (v *volumeClient) GraphDriverDiffSize(id string, parent string) (int, error) { size := 0 err := v.c.Get().Resource(graphPath + "/diffsize").Instance(id).Do().Unmarshal(&size) return size, err @@ -130,24 +113,24 @@ func (v *volumeClient) GraphDriverDiffSize(id, parent string) (int, error) { // Create a new Vol for the specific volume spev.c. // It returns a system generated VolumeID that uniquely identifies the volume -func (v *volumeClient) Create(locator api.VolumeLocator, +func (v *volumeClient) Create( + locator *api.VolumeLocator, source *api.Source, - spec *api.VolumeSpec) (api.VolumeID, error) { - - var response api.VolumeCreateResponse - createReq := api.VolumeCreateRequest{ + spec *api.VolumeSpec, +) (string, error) { + response := &api.VolumeCreateResponse{} + request := &api.VolumeCreateRequest{ Locator: locator, Source: source, Spec: spec, } - err := v.c.Post().Resource(volumePath).Body(&createReq).Do().Unmarshal(&response) - if err != nil { - return api.VolumeID(""), err + if err := v.c.Post().Resource(volumePath).Body(request).Do().Unmarshal(response); err != nil { + return "", err } - if response.Error != "" { - return api.VolumeID(""), errors.New(response.Error) + if response.VolumeResponse != nil && response.VolumeResponse.Error != "" { + return "", errors.New(response.VolumeResponse.Error) } - return response.ID, nil + return response.Id, nil } // Status diagnostic information @@ -157,31 +140,26 @@ func (v *volumeClient) Status() [][2]string { // Inspect specified volumes. // Errors ErrEnoEnt may be returned. -func (v *volumeClient) Inspect(ids []api.VolumeID) ([]api.Volume, error) { - var vols []api.Volume - +func (v *volumeClient) Inspect(ids []string) ([]*api.Volume, error) { if len(ids) == 0 { return nil, nil } - req := v.c.Get().Resource(volumePath) - - for _, v := range ids { - req.QueryOption(string(api.OptVolumeID), string(v)) + var volumes []*api.Volume + request := v.c.Get().Resource(volumePath) + for _, id := range ids { + request.QueryOption(api.OptVolumeID, id) } - err := req.Do().Unmarshal(&vols) - if err != nil { + if err := request.Do().Unmarshal(&volumes); err != nil { return nil, err } - return vols, nil + return volumes, nil } // Delete volume. // Errors ErrEnoEnt, ErrVolHasSnaps may be returned. -func (v *volumeClient) Delete(volumeID api.VolumeID) error { - var response api.VolumeResponse - - err := v.c.Delete().Resource(volumePath).Instance(string(volumeID)).Do().Unmarshal(&response) - if err != nil { +func (v *volumeClient) Delete(volumeID string) error { + response := &api.VolumeResponse{} + if err := v.c.Delete().Resource(volumePath).Instance(volumeID).Do().Unmarshal(response); err != nil { return err } if response.Error != "" { @@ -193,173 +171,171 @@ func (v *volumeClient) Delete(volumeID api.VolumeID) error { // Snap specified volume. IO to the underlying volume should be quiesced before // calling this function. // Errors ErrEnoEnt may be returned -func (v *volumeClient) Snapshot(volumeID api.VolumeID, readonly bool, locator api.VolumeLocator) (api.VolumeID, error) { - var response api.SnapCreateResponse - createReq := api.SnapCreateRequest{ - ID: volumeID, +func (v *volumeClient) Snapshot(volumeID string, readonly bool, locator *api.VolumeLocator) (string, error) { + response := &api.SnapCreateResponse{} + request := &api.SnapCreateRequest{ + Id: volumeID, Readonly: readonly, Locator: locator, } - err := v.c.Post().Resource(snapPath).Body(&createReq).Do().Unmarshal(&response) - if err != nil { - return api.BadVolumeID, err + if err := v.c.Post().Resource(snapPath).Body(request).Do().Unmarshal(response); err != nil { + return "", err } - if response.Error != "" { - return api.BadVolumeID, errors.New(response.Error) + // TODO(pedge): this probably should not be embedded in this way + if response.VolumeCreateResponse != nil && response.VolumeCreateResponse.VolumeResponse != nil && response.VolumeCreateResponse.VolumeResponse.Error != "" { + return "", errors.New(response.VolumeCreateResponse.VolumeResponse.Error) + } + if response.VolumeCreateResponse != nil { + return response.VolumeCreateResponse.Id, nil } - return response.ID, nil + return "", nil } // Stats for specified volume. // Errors ErrEnoEnt may be returned -func (v *volumeClient) Stats(volumeID api.VolumeID) (api.Stats, error) { - var stats api.Stats - err := v.c.Get().Resource(volumePath + "/stats").Instance(string(volumeID)).Do().Unmarshal(&stats) - if err != nil { - return api.Stats{}, err +func (v *volumeClient) Stats(volumeID string) (*api.Stats, error) { + stats := &api.Stats{} + if err := v.c.Get().Resource(volumePath + "/stats").Instance(volumeID).Do().Unmarshal(stats); err != nil { + return nil, err } return stats, nil } // Alerts on this volume. // Errors ErrEnoEnt may be returned -func (v *volumeClient) Alerts(volumeID api.VolumeID) (api.Alerts, error) { - var alerts api.Alerts - err := v.c.Get().Resource(volumePath + "/alerts").Instance(string(volumeID)).Do().Unmarshal(&alerts) - if err != nil { - return api.Alerts{}, err +func (v *volumeClient) Alerts(volumeID string) (*api.Alerts, error) { + alerts:= &api.Alerts{} + if err := v.c.Get().Resource(volumePath + "/alerts").Instance(volumeID).Do().Unmarshal(alerts); err != nil { + return nil, err } return alerts, nil } // Shutdown and cleanup. -func (v *volumeClient) Shutdown() { - return -} +func (v *volumeClient) Shutdown() {} // Enumerate volumes that map to the volumeLocator. Locator fields may be regexp. // If locator fields are left blank, this will return all volumes. -func (v *volumeClient) Enumerate(locator api.VolumeLocator, labels api.Labels) ([]api.Volume, error) { - var vols []api.Volume +func (v *volumeClient) Enumerate(locator *api.VolumeLocator, labels map[string]string) ([]*api.Volume, error) { + var volumes []*api.Volume req := v.c.Get().Resource(volumePath) if locator.Name != "" { - req.QueryOption(string(api.OptName), locator.Name) + req.QueryOption(api.OptName, locator.Name) } if len(locator.VolumeLabels) != 0 { - req.QueryOptionLabel(string(api.OptLabel), locator.VolumeLabels) + req.QueryOptionLabel(api.OptLabel, locator.VolumeLabels) } if len(labels) != 0 { - req.QueryOptionLabel(string(api.OptConfigLabel), labels) + req.QueryOptionLabel(api.OptConfigLabel, labels) } - err := req.Do().Unmarshal(&vols) - if err != nil { + if err := req.Do().Unmarshal(&volumes); err != nil { return nil, err } - return vols, nil + return volumes, nil } // Enumerate snaps for specified volume // Count indicates the number of snaps populated. -func (v *volumeClient) SnapEnumerate(ids []api.VolumeID, snapLabels api.Labels) ([]api.Volume, error) { - var snaps []api.Volume - - req := v.c.Get().Resource(snapPath) - for _, v := range ids { - req.QueryOption(string(api.OptVolumeID), string(v)) +func (v *volumeClient) SnapEnumerate(ids []string, snapLabels map[string]string) ([]*api.Volume, error) { + var volumes []*api.Volume + request := v.c.Get().Resource(snapPath) + for _, id := range ids { + request.QueryOption(api.OptVolumeID, id) } if len(snapLabels) != 0 { - req.QueryOptionLabel(string(api.OptConfigLabel), snapLabels) + request.QueryOptionLabel(api.OptConfigLabel, snapLabels) } - err := req.Do().Unmarshal(&snaps) - if err != nil { + if err := request.Do().Unmarshal(&volumes); err != nil { return nil, err } - return snaps, nil + return volumes, nil } // Attach map device to the host. // On success the devicePath specifies location where the device is exported // Errors ErrEnoEnt, ErrVolAttached may be returned. -func (v *volumeClient) Attach(volumeID api.VolumeID) (string, error) { - var response api.VolumeSetResponse - - req := api.VolumeSetRequest{ - Action: &api.VolumeStateAction{Attach: api.ParamOn}, - } - err := v.c.Put().Resource(volumePath).Instance(string(volumeID)).Body(&req).Do().Unmarshal(&response) +func (v *volumeClient) Attach(volumeID string) (string, error) { + response, err := v.doVolumeSetGetResponse( + volumeID, + &api.VolumeSetRequest{ + Action: &api.VolumeStateAction{ + Attach: api.VolumeActionParam_VOLUME_ACTION_PARAM_ON, + }, + }, + ) if err != nil { return "", err } - if response.VolumeResponse.Error != "" { - return "", errors.New(response.VolumeResponse.Error) + if response.Volume != nil { + return response.Volume.DevicePath, nil } - return response.DevicePath, nil + return "", nil } // Detach device from the host. // Errors ErrEnoEnt, ErrVolDetached may be returned. -func (v *volumeClient) Detach(volumeID api.VolumeID) error { - var response api.VolumeSetResponse - req := api.VolumeSetRequest{ - Action: &api.VolumeStateAction{Attach: api.ParamOff}, - } - err := v.c.Put().Resource(volumePath).Instance(string(volumeID)).Body(&req).Do().Unmarshal(&response) - if err != nil { - return err - } - if response.VolumeResponse.Error != "" { - return errors.New(response.VolumeResponse.Error) - } - return nil +func (v *volumeClient) Detach(volumeID string) error { + return v.doVolumeSet( + volumeID, + &api.VolumeSetRequest{ + Action: &api.VolumeStateAction{ + Attach: api.VolumeActionParam_VOLUME_ACTION_PARAM_OFF, + }, + }, + ) } // Mount volume at specified path // Errors ErrEnoEnt, ErrVolDetached may be returned. -func (v *volumeClient) Mount(volumeID api.VolumeID, mountpath string) error { - var response api.VolumeSetResponse - req := api.VolumeSetRequest{ - Action: &api.VolumeStateAction{Mount: api.ParamOn, MountPath: mountpath}, - } - err := v.c.Put().Resource(volumePath).Instance(string(volumeID)).Body(&req).Do().Unmarshal(&response) - if err != nil { - return err - } - if response.VolumeResponse.Error != "" { - return errors.New(response.VolumeResponse.Error) - } - return nil +func (v *volumeClient) Mount(volumeID string, mountPath string) error { + return v.doVolumeSet( + volumeID, + &api.VolumeSetRequest{ + Action: &api.VolumeStateAction{ + Mount: api.VolumeActionParam_VOLUME_ACTION_PARAM_ON, + MountPath: mountPath, + }, + }, + ) } // Unmount volume at specified path // Errors ErrEnoEnt, ErrVolDetached may be returned. -func (v *volumeClient) Unmount(volumeID api.VolumeID, mountpath string) error { - var response api.VolumeSetResponse - req := api.VolumeSetRequest{ - Action: &api.VolumeStateAction{Mount: api.ParamOff, MountPath: mountpath}, - } - err := v.c.Put().Resource(volumePath).Instance(string(volumeID)).Body(&req).Do().Unmarshal(&response) - if err != nil { - return err - } - if response.VolumeResponse.Error != "" { - return errors.New(response.VolumeResponse.Error) - } - return nil +func (v *volumeClient) Unmount(volumeID string, mountPath string) error { + return v.doVolumeSet( + volumeID, + &api.VolumeSetRequest{ + Action: &api.VolumeStateAction{ + Mount: api.VolumeActionParam_VOLUME_ACTION_PARAM_OFF, + MountPath: mountPath, + }, + }, + ) } // Update volume -func (v *volumeClient) Set(volumeID api.VolumeID, locator *api.VolumeLocator, spec *api.VolumeSpec) error { - var response api.VolumeSetResponse - req := api.VolumeSetRequest{ - Locator: locator, - Spec: spec, - } - err := v.c.Put().Resource(volumePath).Instance(string(volumeID)).Body(&req).Do().Unmarshal(&response) - if err != nil { - return err +func (v *volumeClient) Set(volumeID string, locator *api.VolumeLocator, spec *api.VolumeSpec) error { + return v.doVolumeSet( + volumeID, + &api.VolumeSetRequest{ + Locator: locator, + Spec: spec, + }, + ) +} + +func (v *volumeClient) doVolumeSet(volumeID string, request *api.VolumeSetRequest) error { + _, err := v.doVolumeSetGetResponse(volumeID, request) + return err +} + +func (v *volumeClient) doVolumeSetGetResponse(volumeID string, request *api.VolumeSetRequest) (*api.VolumeSetResponse, error) { + response := &api.VolumeSetResponse{} + if err := v.c.Put().Resource(volumePath).Instance(volumeID).Body(request).Do().Unmarshal(response); err != nil { + return nil, err } - if response.VolumeResponse.Error != "" { - return errors.New(response.VolumeResponse.Error) + if response.VolumeResponse != nil && response.VolumeResponse.Error != "" { + return nil, errors.New(response.VolumeResponse.Error) } - return nil + return response, nil } diff --git a/api/cluster.go b/api/cluster.go deleted file mode 100644 index c0b9d2bcf..000000000 --- a/api/cluster.go +++ /dev/null @@ -1,72 +0,0 @@ -package api - -import ( - "time" - - "github.com/fsouza/go-dockerclient" - "github.com/portworx/systemutils" -) - -type Status int - -const ( - StatusInit Status = 1 << iota - StatusOk - StatusOffline - StatusError -) - -type VolumeInfo struct { - Path string - Storage *VolumeSpec - VolumeID VolumeID -} - -// Node describes the state of a node. -// It includes the current physical state (CPU, memory, storage, network usage) as -// well as the containers running on the system. -type Node struct { - Id string - Cpu float64 // percentage. - Memory float64 // percentage. - Luns map[string]systemutils.Lun - Avgload int - Ip string - Timestamp time.Time - Status Status - Containers []docker.APIContainers - NodeData map[string]interface{} - GenNumber uint64 -} - -// Cluster represents the state of the cluster. -type Cluster struct { - Status Status - Id string - Nodes []Node -} - -// VolumeActionParam desired action on volume -type ClusterActionParam int - -// ClusterStateAction is the body of the REST request to specify desired actions -type ClusterStateAction struct { - // Remove a node or a set of nodes - Remove ClusterActionParam `json:"remove"` - - // Shutdown a node or a set of nodes - Shutdown ClusterActionParam `json:"shutdown"` -} - -// ClusterStateResponse is the body of the REST response -type ClusterStateResponse struct { - // VolumeStateRequest the current state of the volume - ClusterStateAction - ClusterResponse -} - -// VolumeResponse is embedded in all REST responses. -type ClusterResponse struct { - // Error is "" on success or contains the error message on failure. - Error string `json:"error"` -} diff --git a/api/graphdriver.go b/api/graphdriver.go deleted file mode 100644 index 46c572e6f..000000000 --- a/api/graphdriver.go +++ /dev/null @@ -1,14 +0,0 @@ -package api - -// GraphDriverChanges represent a list of changes between the filesystem layers -// specified by the ID and Parent. // Parent may be an empty string, in which -// case there is no parent. -// Where the Path is the filesystem path within the layered filesystem -// that is changed and Kind is an integer specifying the type of change that occurred: -// 0 - Modified -// 1 - Added -// 2 - Deleted -type GraphDriverChanges struct { - Path string // "/some/path" - Kind int -} diff --git a/api/server/cluster.go b/api/server/cluster.go index 0a7740301..9b9447652 100644 --- a/api/server/cluster.go +++ b/api/server/cluster.go @@ -5,11 +5,7 @@ import ( "net/http" "github.com/libopenstorage/openstorage/cluster" -) - -const ( - clusterApiVersion = "v1" - name = "Cluster API" + "github.com/libopenstorage/openstorage/config" ) type clusterApi struct { @@ -21,36 +17,45 @@ type clusterResponse struct { Version string } +func (c *clusterApi) Routes() []*Route { + return []*Route{ + &Route{verb: "GET", path: clusterPath("/enumerate"), fn: c.enumerate}, + &Route{verb: "GET", path: clusterPath("/status"), fn: c.status}, + &Route{verb: "GET", path: clusterPath("/inspect/{id}"), fn: c.inspect}, + &Route{verb: "DELETE", path: clusterPath(""), fn: c.delete}, + &Route{verb: "DELETE", path: clusterPath("/{id}"), fn: c.delete}, + &Route{verb: "PUT", path: clusterPath("/enablegossip"), fn: c.enableGossip}, + &Route{verb: "PUT", path: clusterPath("/disablegossip"), fn: c.disableGossip}, + &Route{verb: "PUT", path: clusterPath("/shutdown"), fn: c.shutdown}, + &Route{verb: "PUT", path: clusterPath("/shutdown/{id}"), fn: c.shutdown}, + } +} func newClusterAPI() restServer { - return &clusterApi{restBase{version: clusterApiVersion, name: name}} + return &clusterApi{restBase{version: config.Version, name: "Cluster API"}} } func (c *clusterApi) String() string { - return name + return c.name } func (c *clusterApi) enumerate(w http.ResponseWriter, r *http.Request) { method := "enumerate" - inst, err := cluster.Inst() if err != nil { - c.sendError(name, method, w, err.Error(), http.StatusInternalServerError) + c.sendError(c.name, method, w, err.Error(), http.StatusInternalServerError) return } - cluster, err := inst.Enumerate() if err != nil { - c.sendError(name, method, w, err.Error(), http.StatusInternalServerError) + c.sendError(c.name, method, w, err.Error(), http.StatusInternalServerError) return } - json.NewEncoder(w).Encode(cluster) } func (c *clusterApi) inspect(w http.ResponseWriter, r *http.Request) { method := "inspect" - - c.sendError(name, method, w, "Not implemented.", http.StatusNotImplemented) + c.sendNotImplemented(w, method) } func (c *clusterApi) enableGossip(w http.ResponseWriter, r *http.Request) { @@ -58,7 +63,7 @@ func (c *clusterApi) enableGossip(w http.ResponseWriter, r *http.Request) { inst, err := cluster.Inst() if err != nil { - c.sendError(name, method, w, err.Error(), http.StatusInternalServerError) + c.sendError(c.name, method, w, err.Error(), http.StatusInternalServerError) return } @@ -66,7 +71,7 @@ func (c *clusterApi) enableGossip(w http.ResponseWriter, r *http.Request) { json.NewEncoder(w).Encode(&clusterResponse{ Status: "OK", - Version: clusterApiVersion, + Version: config.Version, }) } @@ -75,7 +80,7 @@ func (c *clusterApi) disableGossip(w http.ResponseWriter, r *http.Request) { inst, err := cluster.Inst() if err != nil { - c.sendError(name, method, w, err.Error(), http.StatusInternalServerError) + c.sendError(c.name, method, w, err.Error(), http.StatusInternalServerError) return } @@ -83,7 +88,7 @@ func (c *clusterApi) disableGossip(w http.ResponseWriter, r *http.Request) { json.NewEncoder(w).Encode(&clusterResponse{ Status: "OK", - Version: clusterApiVersion, + Version: config.Version, }) } @@ -92,7 +97,7 @@ func (c *clusterApi) status(w http.ResponseWriter, r *http.Request) { inst, err := cluster.Inst() if err != nil { - c.sendError(name, method, w, err.Error(), http.StatusInternalServerError) + c.sendError(c.name, method, w, err.Error(), http.StatusInternalServerError) return } @@ -103,34 +108,22 @@ func (c *clusterApi) status(w http.ResponseWriter, r *http.Request) { func (c *clusterApi) delete(w http.ResponseWriter, r *http.Request) { method := "delete" - - c.sendError(name, method, w, "Not implemented.", http.StatusNotImplemented) + c.sendNotImplemented(w, method) } func (c *clusterApi) shutdown(w http.ResponseWriter, r *http.Request) { method := "shutdown" + c.sendNotImplemented(w, method) +} - c.sendError(name, method, w, "Not implemented.", http.StatusNotImplemented) +func (c *clusterApi) sendNotImplemented(w http.ResponseWriter, method string) { + c.sendError(c.name, method, w, "Not implemented.", http.StatusNotImplemented) } func clusterVersion(route string) string { - return "/" + volApiVersion + "/" + route + return "/" + config.Version + "/" + route } func clusterPath(route string) string { return clusterVersion("cluster" + route) } - -func (c *clusterApi) Routes() []*Route { - return []*Route{ - &Route{verb: "GET", path: clusterPath("/enumerate"), fn: c.enumerate}, - &Route{verb: "GET", path: clusterPath("/status"), fn: c.status}, - &Route{verb: "GET", path: clusterPath("/inspect/{id}"), fn: c.inspect}, - &Route{verb: "DELETE", path: clusterPath(""), fn: c.delete}, - &Route{verb: "DELETE", path: clusterPath("/{id}"), fn: c.delete}, - &Route{verb: "PUT", path: clusterPath("/enablegossip"), fn: c.enableGossip}, - &Route{verb: "PUT", path: clusterPath("/disablegossip"), fn: c.disableGossip}, - &Route{verb: "PUT", path: clusterPath("/shutdown"), fn: c.shutdown}, - &Route{verb: "PUT", path: clusterPath("/shutdown/{id}"), fn: c.shutdown}, - } -} diff --git a/api/server/docker.go b/api/server/docker.go index c46dd6e10..6baffd47c 100644 --- a/api/server/docker.go +++ b/api/server/docker.go @@ -61,13 +61,13 @@ func volDriverPath(method string) string { func (d *driver) volNotFound(request string, id string, e error, w http.ResponseWriter) error { err := fmt.Errorf("Failed to locate volume: " + e.Error()) - d.logReq(request, id).Warn(http.StatusNotFound, " ", err.Error()) + d.logRequest(request, id).Warnln(http.StatusNotFound, " ", err.Error()) return err } func (d *driver) volNotMounted(request string, id string) error { err := fmt.Errorf("volume not mounted") - d.logReq(request, id).Debug(http.StatusNotFound, " ", err.Error()) + d.logRequest(request, id).Debugln(http.StatusNotFound, " ", err.Error()) return err } @@ -98,13 +98,13 @@ func (d *driver) volFromName(name string) (*api.Volume, error) { if err != nil { return nil, fmt.Errorf("Cannot locate volume driver for %s: %s", d.name, err.Error()) } - vols, err := v.Inspect([]api.VolumeID{api.VolumeID(name)}) + vols, err := v.Inspect([]string{name}) if err == nil && len(vols) == 1 { - return &vols[0], nil + return vols[0], nil } - vols, err = v.Enumerate(api.VolumeLocator{Name: name}, nil) + vols, err = v.Enumerate(&api.VolumeLocator{Name: name}, nil) if err == nil && len(vols) == 1 { - return &vols[0], nil + return vols[0], nil } return nil, fmt.Errorf("Cannot locate volume %s", name) } @@ -117,7 +117,7 @@ func (d *driver) decode(method string, w http.ResponseWriter, r *http.Request) ( d.sendError(method, "", w, e.Error()+":"+err.Error(), http.StatusBadRequest) return nil, e } - d.logReq(method, request.Name).Debug("") + d.logRequest(method, request.Name).Debugln("") return &request, nil } @@ -129,7 +129,7 @@ func (d *driver) handshake(w http.ResponseWriter, r *http.Request) { d.sendError("handshake", "", w, "encode error", http.StatusInternalServerError) return } - d.logReq("handshake", "").Debug("Handshake completed") + d.logRequest("handshake", "").Debugln("Handshake completed") } func (d *driver) status(w http.ResponseWriter, r *http.Request) { @@ -145,72 +145,62 @@ func (d *driver) specFromOpts(Opts map[string]string) *api.VolumeSpec { case api.SpecSize: spec.Size, _ = strconv.ParseUint(v, 10, 64) case api.SpecFilesystem: - spec.Format = api.Filesystem(v) + value, _ := api.FSTypeSimpleValueOf(v) + spec.Format = value case api.SpecBlockSize: blockSize, _ := strconv.ParseInt(v, 10, 64) - spec.BlockSize = int(blockSize) + spec.BlockSize = blockSize case api.SpecHaLevel: haLevel, _ := strconv.ParseInt(v, 10, 64) - spec.HALevel = int(haLevel) + spec.HaLevel = haLevel case api.SpecCos: - cos, _ := strconv.ParseInt(v, 10, 64) - spec.Cos = api.VolumeCos(cos) + value, _ := strconv.ParseUint(v, 10, 32) + spec.Cos = uint32(value) case api.SpecDedupe: spec.Dedupe, _ = strconv.ParseBool(v) case api.SpecSnapshotInterval: - snapshotInterval, _ := strconv.ParseInt(v, 10, 64) - spec.SnapshotInterval = int(snapshotInterval) + snapshotInterval, _ := strconv.ParseUint(v, 10, 32) + spec.SnapshotInterval = uint32(snapshotInterval) } } return &spec } func (d *driver) create(w http.ResponseWriter, r *http.Request) { - var err error method := "create" - request, err := d.decode(method, w, r) if err != nil { return } - d.logReq(method, request.Name).Info("") - - _, err = d.volFromName(request.Name) - if err != nil { + d.logRequest(method, request.Name).Infoln("") + if _, err = d.volFromName(request.Name); err != nil { v, err := volume.Get(d.name) if err != nil { d.errorResponse(w, err) return } spec := d.specFromOpts(request.Opts) - _, err = v.Create(api.VolumeLocator{Name: request.Name}, nil, spec) - if err != nil { + if _, err := v.Create(&api.VolumeLocator{Name: request.Name}, nil, spec); err != nil { d.errorResponse(w, err) return } } - json.NewEncoder(w).Encode(&volumeResponse{}) } func (d *driver) remove(w http.ResponseWriter, r *http.Request) { method := "remove" - request, err := d.decode(method, w, r) if err != nil { return } - - d.logReq(method, request.Name).Info("") - + d.logRequest(method, request.Name).Infoln("") // It is an error if the volume doesn't exist. - _, err = d.volFromName(request.Name) - if err != nil { + if _, err := d.volFromName(request.Name); err != nil { e := d.volNotFound(method, request.Name, err, w) d.errorResponse(w, e) return } - json.NewEncoder(w).Encode(&volumeResponse{}) } @@ -220,7 +210,7 @@ func (d *driver) mount(w http.ResponseWriter, r *http.Request) { v, err := volume.Get(d.name) if err != nil { - d.logReq(method, "").Warn("Cannot locate volume driver") + d.logRequest(method, "").Warnf("Cannot locate volume driver") d.errorResponse(w, err) return } @@ -231,7 +221,7 @@ func (d *driver) mount(w http.ResponseWriter, r *http.Request) { return } - d.logReq(method, request.Name).Debug("") + d.logRequest(method, request.Name).Debugln("") vol, err := d.volFromName(request.Name) if err != nil { @@ -240,23 +230,23 @@ func (d *driver) mount(w http.ResponseWriter, r *http.Request) { } // If this is a block driver, first attach the volume. - if v.Type()&api.Block != 0 { - attachPath, err := v.Attach(vol.ID) + if v.Type() == api.DriverType_DRIVER_TYPE_BLOCK { + attachPath, err := v.Attach(vol.Id) if err != nil { - d.logReq(method, request.Name).Warnf("Cannot attach volume: %v", err.Error()) + d.logRequest(method, request.Name).Warnf("Cannot attach volume: %v", err.Error()) d.errorResponse(w, err) return } - d.logReq(method, request.Name).Debugf("response %v", attachPath) + d.logRequest(method, request.Name).Debugf("response %v", attachPath) } // Now mount it. response.Mountpoint = path.Join(config.MountBase, request.Name) os.MkdirAll(response.Mountpoint, 0755) - err = v.Mount(vol.ID, response.Mountpoint) + err = v.Mount(vol.Id, response.Mountpoint) if err != nil { - d.logReq(method, request.Name).Warnf("Cannot mount volume %v, %v", + d.logRequest(method, request.Name).Warnf("Cannot mount volume %v, %v", response.Mountpoint, err) d.errorResponse(w, err) return @@ -264,7 +254,7 @@ func (d *driver) mount(w http.ResponseWriter, r *http.Request) { response.Mountpoint = path.Join(response.Mountpoint, config.DataDir) os.MkdirAll(response.Mountpoint, 0755) - d.logReq(method, request.Name).Infof("response %v", response.Mountpoint) + d.logRequest(method, request.Name).Infof("response %v", response.Mountpoint) json.NewEncoder(w).Encode(&response) } @@ -284,7 +274,7 @@ func (d *driver) path(w http.ResponseWriter, r *http.Request) { return } - d.logReq(method, request.Name).Debug("") + d.logRequest(method, request.Name).Debugf("") response.Mountpoint = vol.AttachPath if response.Mountpoint == "" { e := d.volNotMounted(method, request.Name) @@ -292,7 +282,7 @@ func (d *driver) path(w http.ResponseWriter, r *http.Request) { return } response.Mountpoint = path.Join(response.Mountpoint, config.DataDir) - d.logReq(method, request.Name).Debugf("response %v", response.Mountpoint) + d.logRequest(method, request.Name).Debugf("response %v", response.Mountpoint) json.NewEncoder(w).Encode(&response) } @@ -301,12 +291,12 @@ func (d *driver) list(w http.ResponseWriter, r *http.Request) { v, err := volume.Get(d.name) if err != nil { - d.logReq(method, "").Warnf("Cannot locate volume driver: %v", err.Error()) + d.logRequest(method, "").Warnf("Cannot locate volume driver: %v", err.Error()) d.errorResponse(w, err) return } - vols, err := v.Enumerate(api.VolumeLocator{}, nil) + vols, err := v.Enumerate(nil, nil) if err != nil { d.errorResponse(w, err) return @@ -349,7 +339,7 @@ func (d *driver) unmount(w http.ResponseWriter, r *http.Request) { v, err := volume.Get(d.name) if err != nil { - d.logReq(method, "").Warnf("Cannot locate volume driver: %v", err.Error()) + d.logRequest(method, "").Warnf("Cannot locate volume driver: %v", err.Error()) d.errorResponse(w, err) return } @@ -359,7 +349,7 @@ func (d *driver) unmount(w http.ResponseWriter, r *http.Request) { return } - d.logReq(method, request.Name).Info("") + d.logRequest(method, request.Name).Infoln("") vol, err := d.volFromName(request.Name) if err != nil { @@ -369,16 +359,16 @@ func (d *driver) unmount(w http.ResponseWriter, r *http.Request) { } mountpoint := path.Join(config.MountBase, request.Name) - err = v.Unmount(vol.ID, mountpoint) + err = v.Unmount(vol.Id, mountpoint) if err != nil { - d.logReq(method, request.Name).Warnf("Cannot unmount volume %v, %v", + d.logRequest(method, request.Name).Warnf("Cannot unmount volume %v, %v", mountpoint, err) d.errorResponse(w, err) return } - if v.Type()&api.Block != 0 { - _ = v.Detach(vol.ID) + if v.Type() == api.DriverType_DRIVER_TYPE_BLOCK { + _ = v.Detach(vol.Id) } d.emptyResponse(w) } diff --git a/api/server/graphdriver.go b/api/server/graphdriver.go index 13ef9bc4b..28fcb3e51 100644 --- a/api/server/graphdriver.go +++ b/api/server/graphdriver.go @@ -76,7 +76,7 @@ func (d *graphDriver) emptyResponse(w http.ResponseWriter) { } func (d *graphDriver) errResponse(method string, w http.ResponseWriter, err error) { - d.logReq(method, "").Warnf("%v", err) + d.logRequest(method, "").Warnf("%v", err) fmt.Fprintln(w, fmt.Sprintf(`{"Err": %q}`, err.Error())) } @@ -93,9 +93,9 @@ func (d *graphDriver) decode(method string, w http.ResponseWriter, r *http.Reque return nil, err } if len(request.Parent) != 0 { - d.logReq(method, request.ID).Debug("Parent: ", request.Parent) + d.logRequest(method, request.ID).Debugln("Parent: ", request.Parent) } else { - d.logReq(method, request.ID).Debug("") + d.logRequest(method, request.ID).Debugln("") } return &request, nil } @@ -110,7 +110,7 @@ func (d *graphDriver) handshake(w http.ResponseWriter, r *http.Request) { d.sendError("handshake", "", w, "encode error", http.StatusInternalServerError) return } - d.logReq("handshake", "").Debug("Handshake completed") + d.logRequest("handshake", "").Debugln("Handshake completed") } func (d *graphDriver) init(w http.ResponseWriter, r *http.Request) { @@ -119,7 +119,7 @@ func (d *graphDriver) init(w http.ResponseWriter, r *http.Request) { Home string Opts []string } - d.logReq(method, request.Home).Info("") + d.logRequest(method, request.Home).Infoln("") if err := json.NewDecoder(r.Body).Decode(&request); err != nil { d.decodeError(method, w, err) return @@ -315,7 +315,7 @@ func (d *graphDriver) applyDiff(w http.ResponseWriter, r *http.Request) { id := r.URL.Query().Get("id") parent := r.URL.Query().Get("parent") - d.logReq(method, id).Debugf("Parent %v", parent) + d.logRequest(method, id).Debugf("Parent %v", parent) size, err := d.gd.ApplyDiff(id, parent, r.Body) if err != nil { d.errResponse(method, w, err) diff --git a/api/server/server.go b/api/server/server.go index 8e8bce7c5..293ffe00b 100644 --- a/api/server/server.go +++ b/api/server/server.go @@ -7,7 +7,8 @@ import ( "os" "path" - log "github.com/Sirupsen/logrus" + "go.pedge.io/dlog" + "github.com/gorilla/mux" ) @@ -21,7 +22,7 @@ type Route struct { type restServer interface { Routes() []*Route String() string - logReq(request string, id string) *log.Entry + logRequest(request string, id string) dlog.Logger sendError(request string, id string, w http.ResponseWriter, msg string, code int) } @@ -31,20 +32,20 @@ type restBase struct { name string } -func (rest *restBase) logReq(request string, id string) *log.Entry { - return log.WithFields(log.Fields{ +func (rest *restBase) logRequest(request string, id string) dlog.Logger { + return dlog.WithFields(map[string]interface{}{ "Driver": rest.name, "Request": request, "ID": id, }) } func (rest *restBase) sendError(request string, id string, w http.ResponseWriter, msg string, code int) { - rest.logReq(request, id).Warn(code, " ", msg) + rest.logRequest(request, id).Warnln(code, " ", msg) http.Error(w, msg, code) } func notFound(w http.ResponseWriter, r *http.Request) { - log.Warnf("Not found: %+v ", r.URL) + dlog.Warnf("Not found: %+v ", r.URL) http.NotFound(w, r) } @@ -63,10 +64,10 @@ func startServer(name string, sockBase string, port int, routes []*Route) error os.Remove(socket) os.MkdirAll(path.Dir(socket), 0755) - log.Printf("Starting REST service on %+v", socket) + dlog.Printf("Starting REST service on %+v", socket) listener, err = net.Listen("unix", socket) if err != nil { - log.Warn("Cannot listen on UNIX socket: ", err) + dlog.Warnln("Cannot listen on UNIX socket: ", err) return err } go http.Serve(listener, router) diff --git a/api/server/volume.go b/api/server/volume.go index 26af3a780..dfdd3c823 100644 --- a/api/server/volume.go +++ b/api/server/volume.go @@ -8,13 +8,10 @@ import ( "github.com/gorilla/mux" "github.com/libopenstorage/openstorage/api" + "github.com/libopenstorage/openstorage/config" "github.com/libopenstorage/openstorage/volume" ) -const ( - volApiVersion = "v1" -) - type volApi struct { restBase } @@ -27,19 +24,19 @@ func responseStatus(err error) string { } func newVolumeAPI(name string) restServer { - return &volApi{restBase{version: volApiVersion, name: name}} + return &volApi{restBase{version: config.Version, name: name}} } func (vd *volApi) String() string { return vd.name } -func (vd *volApi) parseVolumeID(r *http.Request) (api.VolumeID, error) { +func (vd *volApi) parseVolumeID(r *http.Request) (string, error) { vars := mux.Vars(r) if id, ok := vars["id"]; ok { - return api.VolumeID(id), nil + return string(id), nil } - return api.BadVolumeID, fmt.Errorf("could not parse snap ID") + return "", fmt.Errorf("could not parse snap ID") } func (vd *volApi) create(w http.ResponseWriter, r *http.Request) { @@ -57,18 +54,18 @@ func (vd *volApi) create(w http.ResponseWriter, r *http.Request) { notFound(w, r) return } - ID, err := d.Create(dcReq.Locator, dcReq.Source, dcReq.Spec) - dcRes.VolumeResponse = api.VolumeResponse{Error: responseStatus(err)} - dcRes.ID = ID + id, err := d.Create(dcReq.Locator, dcReq.Source, dcReq.Spec) + dcRes.VolumeResponse = &api.VolumeResponse{Error: responseStatus(err)} + dcRes.Id = id - vd.logReq(method, string(ID)).Info("") + vd.logRequest(method, id).Infoln("") json.NewEncoder(w).Encode(&dcRes) } func (vd *volApi) volumeSet(w http.ResponseWriter, r *http.Request) { var ( - volumeID api.VolumeID + volumeID string err error req api.VolumeSetRequest resp api.VolumeSetResponse @@ -86,7 +83,7 @@ func (vd *volApi) volumeSet(w http.ResponseWriter, r *http.Request) { return } - vd.logReq(method, string(volumeID)).Info("") + vd.logRequest(method, string(volumeID)).Infoln("") d, err := volume.Get(vd.name) if err != nil { @@ -99,8 +96,8 @@ func (vd *volApi) volumeSet(w http.ResponseWriter, r *http.Request) { } for err == nil && req.Action != nil { - if req.Action.Attach != api.ParamIgnore { - if req.Action.Attach == api.ParamOn { + if req.Action.Attach != api.VolumeActionParam_VOLUME_ACTION_PARAM_NONE { + if req.Action.Attach == api.VolumeActionParam_VOLUME_ACTION_PARAM_ON { _, err = d.Attach(volumeID) } else { err = d.Detach(volumeID) @@ -109,8 +106,8 @@ func (vd *volApi) volumeSet(w http.ResponseWriter, r *http.Request) { break } } - if req.Action.Mount != api.ParamIgnore { - if req.Action.Mount == api.ParamOn { + if req.Action.Mount != api.VolumeActionParam_VOLUME_ACTION_PARAM_NONE { + if req.Action.Mount == api.VolumeActionParam_VOLUME_ACTION_PARAM_ON { if req.Action.MountPath == "" { err = fmt.Errorf("Invalid mount path") break @@ -127,16 +124,21 @@ func (vd *volApi) volumeSet(w http.ResponseWriter, r *http.Request) { } if err != nil { - resp.VolumeResponse.Error = err.Error() + resp.VolumeResponse = &api.VolumeResponse{ + Error: err.Error(), + } } else { - v, err := d.Inspect([]api.VolumeID{volumeID}) + v, err := d.Inspect([]string{volumeID}) if err != nil || v == nil || len(v) != 1 { if err == nil { err = fmt.Errorf("Failed to inspect volume") } - resp.VolumeResponse.Error = err.Error() + resp.VolumeResponse = &api.VolumeResponse{ + Error: err.Error(), + } } else { - resp.Volume = v[0] + v0 := v[0] + resp.Volume = v0 } } json.NewEncoder(w).Encode(resp) @@ -144,7 +146,7 @@ func (vd *volApi) volumeSet(w http.ResponseWriter, r *http.Request) { func (vd *volApi) inspect(w http.ResponseWriter, r *http.Request) { var err error - var volumeID api.VolumeID + var volumeID string method := "inspect" d, err := volume.Get(vd.name) @@ -159,9 +161,9 @@ func (vd *volApi) inspect(w http.ResponseWriter, r *http.Request) { return } - vd.logReq(method, string(volumeID)).Info("") + vd.logRequest(method, string(volumeID)).Infoln("") - dk, err := d.Inspect([]api.VolumeID{volumeID}) + dk, err := d.Inspect([]string{volumeID}) if err != nil { vd.sendError(vd.name, method, w, err.Error(), http.StatusNotFound) return @@ -171,7 +173,7 @@ func (vd *volApi) inspect(w http.ResponseWriter, r *http.Request) { } func (vd *volApi) delete(w http.ResponseWriter, r *http.Request) { - var volumeID api.VolumeID + var volumeID string var err error method := "delete" @@ -181,7 +183,7 @@ func (vd *volApi) delete(w http.ResponseWriter, r *http.Request) { return } - vd.logReq(method, string(volumeID)).Info("") + vd.logRequest(method, volumeID).Infoln("") d, err := volume.Get(vd.name) if err != nil { @@ -189,16 +191,18 @@ func (vd *volApi) delete(w http.ResponseWriter, r *http.Request) { return } - err = d.Delete(volumeID) - res := api.ResponseStatusNew(err) - json.NewEncoder(w).Encode(res) + volumeResponse := &api.VolumeResponse{} + if err := d.Delete(volumeID); err != nil { + volumeResponse.Error = err.Error() + } + json.NewEncoder(w).Encode(volumeResponse) } func (vd *volApi) enumerate(w http.ResponseWriter, r *http.Request) { var locator api.VolumeLocator - var configLabels api.Labels + var configLabels map[string]string var err error - var vols []api.Volume + var vols []*api.Volume method := "enumerate" @@ -228,9 +232,9 @@ func (vd *volApi) enumerate(w http.ResponseWriter, r *http.Request) { } v = params[string(api.OptVolumeID)] if v != nil { - ids := make([]api.VolumeID, len(v)) + ids := make([]string, len(v)) for i, s := range v { - ids[i] = api.VolumeID(s) + ids[i] = string(s) } vols, err = d.Inspect(ids) if err != nil { @@ -239,7 +243,7 @@ func (vd *volApi) enumerate(w http.ResponseWriter, r *http.Request) { return } } else { - vols, _ = d.Enumerate(locator, configLabels) + vols, _ = d.Enumerate(&locator, configLabels) } json.NewEncoder(w).Encode(vols) } @@ -259,18 +263,22 @@ func (vd *volApi) snap(w http.ResponseWriter, r *http.Request) { return } - vd.logReq(method, string(snapReq.ID)).Info("") + vd.logRequest(method, string(snapReq.Id)).Infoln("") - ID, err := d.Snapshot(snapReq.ID, snapReq.Readonly, snapReq.Locator) - snapRes.VolumeCreateResponse.VolumeResponse = api.VolumeResponse{Error: responseStatus(err)} - snapRes.VolumeCreateResponse.ID = ID + id, err := d.Snapshot(snapReq.Id, snapReq.Readonly, snapReq.Locator) + snapRes.VolumeCreateResponse = &api.VolumeCreateResponse{ + Id: id, + VolumeResponse: &api.VolumeResponse{ + Error: responseStatus(err), + }, + } json.NewEncoder(w).Encode(&snapRes) } func (vd *volApi) snapEnumerate(w http.ResponseWriter, r *http.Request) { var err error - var labels api.Labels - var ids []api.VolumeID + var labels map[string]string + var ids []string method := "snapEnumerate" d, err := volume.Get(vd.name) @@ -289,9 +297,9 @@ func (vd *volApi) snapEnumerate(w http.ResponseWriter, r *http.Request) { v, ok := params[string(api.OptVolumeID)] if v != nil && ok { - ids = make([]api.VolumeID, len(params)) + ids = make([]string, len(params)) for i, s := range v { - ids[i] = api.VolumeID(s) + ids[i] = string(s) } } @@ -306,7 +314,7 @@ func (vd *volApi) snapEnumerate(w http.ResponseWriter, r *http.Request) { } func (vd *volApi) stats(w http.ResponseWriter, r *http.Request) { - var volumeID api.VolumeID + var volumeID string var err error method := "stats" @@ -316,7 +324,7 @@ func (vd *volApi) stats(w http.ResponseWriter, r *http.Request) { return } - vd.logReq(method, string(volumeID)).Info("") + vd.logRequest(method, string(volumeID)).Infoln("") d, err := volume.Get(vd.name) if err != nil { @@ -334,7 +342,7 @@ func (vd *volApi) stats(w http.ResponseWriter, r *http.Request) { } func (vd *volApi) alerts(w http.ResponseWriter, r *http.Request) { - var volumeID api.VolumeID + var volumeID string var err error method := "alerts" @@ -360,7 +368,7 @@ func (vd *volApi) alerts(w http.ResponseWriter, r *http.Request) { } func volVersion(route string) string { - return "/" + volApiVersion + "/" + route + return "/" + config.Version + "/" + route } func volPath(route string) string { diff --git a/api/types.go b/api/types.go deleted file mode 100644 index 880799e71..000000000 --- a/api/types.go +++ /dev/null @@ -1,196 +0,0 @@ -package api - -import ( - "time" -) - -// VolumeID driver specific system wide unique volume identifier. -type VolumeID string - -// BadVolumeID invalid volume ID, usually accompanied by an error. -const BadVolumeID = VolumeID("") - -// VolumeCos a number representing class of servcie. -type VolumeCos int - -const ( - // VolumeCosNone minmum level of CoS - VolumeCosNone = VolumeCos(0) - // VolumeCosMedium in-between level of Cos - VolumeCosMedium = VolumeCos(5) - // VolumeCosMax maximum level of CoS - VolumeCosMax = VolumeCos(9) -) - -// VolumeStatus a health status. -type VolumeStatus string - -const ( - // NotPresent This volume is not present. - NotPresent = VolumeStatus("NotPresent") - // Up status healthy - Up = VolumeStatus("Up") - // Down status failure. - Down = VolumeStatus("Down") - // Degraded status up but with degraded performance. In a RAID group, this may indicate a problem with one or more drives - Degraded = VolumeStatus("Degraded") -) - -// VolumeState is one of the below enumerations and reflects the state -// of a volume. -type VolumeState int - -const ( - // VolumePending volume is transitioning to new state - VolumePending VolumeState = 1 << iota - // VolumeAvailable volume is ready to be assigned to a container - VolumeAvailable - // VolumeAttached is attached to container - VolumeAttached - // VolumeDetached is detached but associated with a container. - VolumeDetached - // VolumeDetaching is detach is in progress. - VolumeDetaching - // VolumeError is in Error State - VolumeError - // VolumeDeleted is deleted, it will remain in this state while resources are - // asynchronously reclaimed. - VolumeDeleted -) - -// VolumeStateAny a filter that selects all volumes -const VolumeStateAny = VolumePending | VolumeAvailable | VolumeAttached | VolumeDetaching | VolumeDetached | VolumeError | VolumeDeleted - -// Labels a name-value map -type Labels map[string]string - -// VolumeLocator is a structure that is attached to a volume and is used to -// carry opaque metadata. -type VolumeLocator struct { - // Name user friendly identifier - Name string - // VolumeLabels set of name-value pairs that acts as search filters. - VolumeLabels Labels -} - -// CreateOptions are passed in with a CreateRequest -type Source struct { - // Parent if specified will create a clone of Parent. - Parent VolumeID - // Seed will seed the volume from the specified URI. Any - // additional config for the source comes from the labels in the spec. - Seed string -} - -// Filesystem supported filesystems -type Filesystem string - -const ( - FsNone Filesystem = "none" - FsExt4 Filesystem = "ext4" - FsXfs Filesystem = "xfs" - FsZfs Filesystem = "zfs" - FsNfs Filesystem = "nfs" -) - -// Strings for VolumeSpec -const ( - SpecEphemeral = "ephemeral" - SpecSize = "size" - SpecFilesystem = "format" - SpecBlockSize = "blocksize" - SpecHaLevel = "ha_level" - SpecCos = "cos" - SpecSnapshotInterval = "snapshot_interval" - SpecDedupe = "dedupe" -) - -// VolumeSpec has the properties needed to create a volume. -type VolumeSpec struct { - // Ephemeral storage - Ephemeral bool - // Thin provisioned volume size in bytes - Size uint64 - // Format disk with this FileSystem - Format Filesystem - // BlockSize for file system - BlockSize int - // HA Level specifies the number of nodes that are - // allowed to fail, and yet data is availabel. - // A value of 0 implies that data is not erasure coded, - // a failure of a node will lead to data loss. - HALevel int - // This disk's CoS - Cos VolumeCos - // Perform dedupe on this disk - Dedupe bool - // SnapshotInterval in minutes, set to 0 to disable Snapshots - SnapshotInterval int - // Volume configuration labels - ConfigLabels Labels -} - -// MachineID is a node instance identifier for clustered systems. -type MachineID string - -const MachineNone MachineID = "" - -// Volume represents a live, created volume. -type Volume struct { - // ID Self referential VolumeID - ID VolumeID - // Source - Source *Source - // Readonly - Readonly bool - // Locator User specified locator - Locator VolumeLocator - // Ctime Volume creation time - Ctime time.Time - // Spec User specified VolumeSpec - Spec *VolumeSpec - // Usage Volume usage - Usage uint64 - // LastScan time when an integrity check for run - LastScan time.Time - // Format Filesystem type if any - Format Filesystem - // Status see VolumeStatus - Status VolumeStatus - // State see VolumeState - State VolumeState - // AttachedOn - Node on which this volume is attached. - AttachedOn MachineID - // DevicePath - DevicePath string - // AttachPath - AttachPath string - // ReplicaSet Set of nodes no which this Volume is erasure coded - for clustered storage arrays - ReplicaSet []MachineID - // Error Last recorded error - Error string -} - -// Alerts -type Stats struct { - // Reads completed successfully. - Reads int64 - // ReadMs time spent in reads in ms. - ReadMs int64 - // ReadBytes - ReadBytes int64 - // Writes completed successfully. - Writes int64 - // WriteBytes - WriteBytes int64 - // WriteMs time spent in writes in ms. - WriteMs int64 - // IOProgress I/Os curently in progress. - IOProgress int64 - // IOMs time spent doing I/Os ms. - IOMs int64 -} - -// Alerts -type Alerts struct { -} diff --git a/api/version.go b/api/version.go deleted file mode 100644 index 2d0106696..000000000 --- a/api/version.go +++ /dev/null @@ -1,4 +0,0 @@ -package api - -// Version API version -const Version = "v1" diff --git a/api/volumes.go b/api/volumes.go deleted file mode 100644 index 98ee31290..000000000 --- a/api/volumes.go +++ /dev/null @@ -1,99 +0,0 @@ -package api - -// OptionKey specifies a set of recognized query params -type OptionKey string - -const ( - // OptName query parameter used to lookup volume by name - OptName = OptionKey("Name") - // OptVolumeID query parameter used to lookup volume by ID. - OptVolumeID = OptionKey("VolumeID") - // OptLabel query parameter used to lookup volume by set of labels. - OptLabel = OptionKey("Label") - // OptConfigLabel query parameter used to lookup volume by set of labels. - OptConfigLabel = OptionKey("ConfigLabel") -) - -// VolumeCreateRequest is the body of create REST request -type VolumeCreateRequest struct { - // Locator user specified volume name and labels. - Locator VolumeLocator `json:"locator"` - // Source to create volume - Source *Source `json:"source,omitempty"` - // Spec is the storage spec for the volume - Spec *VolumeSpec `json:"spec,omitempty"` -} - -// VolumeCreateResponse is the body of create REST response -type VolumeCreateResponse struct { - // ID of the newly created volume - ID VolumeID `json:"id"` - VolumeResponse -} - -// VolumeActionParam desired action on volume -type VolumeActionParam int - -const ( - // ParamIgnore user should ignore the value of the parameter. - ParamIgnore VolumeActionParam = iota - // ParamOff maps to the boolean value false. - ParamOff - // ParamOn maps to the boolean value true. - ParamOn -) - -// VolumeStateAction is the body of the REST request to specify desired actions -type VolumeStateAction struct { - // Attach or Detach volume - Attach VolumeActionParam `json:"attach"` - // Mount or unmount volume - Mount VolumeActionParam `json:"mount"` - // MountPath - MountPath string `json:"mount_path"` - // DevicePath returned in Attach - DevicePath string `json:"device_path"` -} - -type VolumeSetRequest struct { - // Locator user specified volume name and labels. - Locator *VolumeLocator `json:"locator,omitempty"` - // Spec is the storage spec for the volume - Spec *VolumeSpec `json:"spec,omitempty"` - // Action state modification on this volume. - Action *VolumeStateAction `json:"action,omitempty"` -} - -// VolumeSetResponse is the body of the REST response -type VolumeSetResponse struct { - // Volume updated volume - Volume - // VolumeResponse error status - VolumeResponse -} - -// VolumeResponse is embedded in all REST responses. -type VolumeResponse struct { - // Error is "" on success or contains the error message on failure. - Error string `json:"error"` -} - -// SnapCreateRequest request body to create a snap. -type SnapCreateRequest struct { - ID VolumeID `json:"id"` - Locator VolumeLocator `json:"locator"` - Readonly bool `json:"readonly"` -} - -// SnapCreateResponse response body to SnapCreateRequest -type SnapCreateResponse struct { - VolumeCreateResponse -} - -// ResponseStatusNew create VolumeResponse from error -func ResponseStatusNew(err error) VolumeResponse { - if err == nil { - return VolumeResponse{} - } - return VolumeResponse{Error: err.Error()} -} diff --git a/cli/cli_test.go b/cli/cli_test.go new file mode 100644 index 000000000..5c5aa150f --- /dev/null +++ b/cli/cli_test.go @@ -0,0 +1,30 @@ +package cli + +import ( + "testing" + + "github.com/libopenstorage/openstorage/api" + "github.com/stretchr/testify/require" +) + +func TestCmdMarshalProto(t *testing.T) { + volumeSpec := &api.VolumeSpec{ + Size: 64, + Format: api.FSType_FS_TYPE_EXT4, + } + data := cmdMarshalProto(volumeSpec) + require.Equal( + t, + `{ + "ephemeral": false, + "size": "64", + "format": "ext4", + "block_size": "0", + "ha_level": "0", + "cos": 0, + "dedupe": false, + "snapshot_interval": 0 +}`, + data, + ) +} diff --git a/cli/cluster.go b/cli/cluster.go index bffca118c..cd19ede59 100644 --- a/cli/cluster.go +++ b/cli/cluster.go @@ -51,11 +51,11 @@ func (c *clusterClient) status(context *cli.Context) { fmt.Fprintln(w, "ID\t IP\t STATUS\t CPU\t MEMORY\t CONTAINERS") for _, n := range cluster.Nodes { status := "" - if n.Status == api.StatusInit { + if n.Status == api.Status_STATUS_INIT { status = "Initializing" - } else if n.Status == api.StatusOk { + } else if n.Status == api.Status_STATUS_OK { status = "OK" - } else if n.Status == api.StatusOffline { + } else if n.Status == api.Status_STATUS_OFFLINE { status = "Off Line" } else { status = "Error" diff --git a/cli/messages.go b/cli/messages.go index ff33386af..cea93ee50 100644 --- a/cli/messages.go +++ b/cli/messages.go @@ -7,8 +7,13 @@ import ( "strings" "github.com/codegangsta/cli" - "github.com/libopenstorage/openstorage/api" + "github.com/libopenstorage/openstorage/pkg/jsonpb" + "github.com/golang/protobuf/proto" +) + +var ( + marshaler = &jsonpb.Marshaler{EnumsAsSimpleStrings: true, Indent: " "} ) // Format standardizes the screen output of commands. @@ -82,6 +87,15 @@ func cmdOutput(c *cli.Context, body interface{}) { fmt.Printf("%+v\n", string(b)) } +func cmdMarshalProto(message proto.Message) string { + s, _ := marshaler.MarshalToString(message) + return s +} + +func cmdOutputProto(message proto.Message) { + fmt.Println(cmdMarshalProto(message)) +} + func fmtOutput(c *cli.Context, format *Format) { jsonOut := c.GlobalBool("json") outFd := os.Stdout diff --git a/cli/volumes.go b/cli/volumes.go index 2e1595ad3..d25a37205 100644 --- a/cli/volumes.go +++ b/cli/volumes.go @@ -33,7 +33,7 @@ type volDriver struct { name string } -func processLabels(s string) (api.Labels, error) { +func processLabels(s string) (map[string]string, error) { m := make(map[string]string) labels := strings.Split(s, ",") for _, v := range labels { @@ -60,9 +60,9 @@ func (v *volDriver) volumeOptions(context *cli.Context) { func (v *volDriver) volumeCreate(context *cli.Context) { var err error - var labels api.Labels - var locator api.VolumeLocator - var id api.VolumeID + var labels map[string]string + locator := &api.VolumeLocator{} + var id string fn := "create" if len(context.Args()) != 1 { @@ -77,17 +77,22 @@ func (v *volDriver) volumeCreate(context *cli.Context) { return } } - locator = api.VolumeLocator{ + locator = &api.VolumeLocator{ Name: context.Args()[0], VolumeLabels: labels, } + fsType, err := api.FSTypeSimpleValueOf(context.String("fs")) + if err != nil { + cmdError(context, fn, err) + return + } spec := &api.VolumeSpec{ Size: uint64(VolumeSzUnits(context.Int("s")) * MiB), - Format: api.Filesystem(context.String("fs")), - BlockSize: context.Int("b") * 1024, - HALevel: context.Int("r"), - Cos: api.VolumeCos(context.Int("cos")), - SnapshotInterval: context.Int("si"), + Format: fsType, + BlockSize: int64(context.Int("b") * 1024), + HaLevel: int64(context.Int("r")), + Cos: uint32(context.Int("cos")), + SnapshotInterval: uint32(context.Int("si")), } source := &api.Source{ Seed: context.String("seed"), @@ -116,7 +121,7 @@ func (v *volDriver) volumeMount(context *cli.Context) { return } - err := v.volDriver.Mount(api.VolumeID(volumeID), path) + err := v.volDriver.Mount(string(volumeID), path) if err != nil { cmdError(context, fn, err) return @@ -137,7 +142,7 @@ func (v *volDriver) volumeUnmount(context *cli.Context) { path := context.String("path") - err := v.volDriver.Unmount(api.VolumeID(volumeID), path) + err := v.volDriver.Unmount(string(volumeID), path) if err != nil { cmdError(context, fn, err) return @@ -155,7 +160,7 @@ func (v *volDriver) volumeAttach(context *cli.Context) { v.volumeOptions(context) volumeID := context.Args()[0] - devicePath, err := v.volDriver.Attach(api.VolumeID(volumeID)) + devicePath, err := v.volDriver.Attach(string(volumeID)) if err != nil { cmdError(context, fn, err) return @@ -172,7 +177,7 @@ func (v *volDriver) volumeDetach(context *cli.Context) { } volumeID := context.Args()[0] v.volumeOptions(context) - err := v.volDriver.Detach(api.VolumeID(volumeID)) + err := v.volDriver.Detach(string(volumeID)) if err != nil { cmdError(context, fn, err) return @@ -189,9 +194,9 @@ func (v *volDriver) volumeInspect(context *cli.Context) { return } - d := make([]api.VolumeID, len(context.Args())) + d := make([]string, len(context.Args())) for i, v := range context.Args() { - d[i] = api.VolumeID(v) + d[i] = string(v) } volumes, err := v.volDriver.Inspect(d) @@ -200,7 +205,7 @@ func (v *volDriver) volumeInspect(context *cli.Context) { return } - cmdOutput(context, volumes) + cmdOutputVolumes(volumes) } func (v *volDriver) volumeStats(context *cli.Context) { @@ -211,13 +216,13 @@ func (v *volDriver) volumeStats(context *cli.Context) { return } - stats, err := v.volDriver.Stats(api.VolumeID(context.Args()[0])) + stats, err := v.volDriver.Stats(string(context.Args()[0])) if err != nil { cmdError(context, fn, err) return } - cmdOutput(context, stats) + cmdOutputProto(stats) } func (v *volDriver) volumeAlerts(context *cli.Context) { @@ -228,17 +233,17 @@ func (v *volDriver) volumeAlerts(context *cli.Context) { return } - alerts, err := v.volDriver.Alerts(api.VolumeID(context.Args()[0])) + alerts, err := v.volDriver.Alerts(string(context.Args()[0])) if err != nil { cmdError(context, fn, err) return } - cmdOutput(context, alerts) + cmdOutputProto(alerts) } func (v *volDriver) volumeEnumerate(context *cli.Context) { - var locator api.VolumeLocator + locator := &api.VolumeLocator{} var err error fn := "enumerate" @@ -255,9 +260,9 @@ func (v *volDriver) volumeEnumerate(context *cli.Context) { volumes, err := v.volDriver.Enumerate(locator, nil) if err != nil { cmdError(context, fn, err) - return - } - cmdOutput(context, volumes) + return + } + cmdOutputVolumes(volumes) } func (v *volDriver) volumeDelete(context *cli.Context) { @@ -268,7 +273,7 @@ func (v *volDriver) volumeDelete(context *cli.Context) { } volumeID := context.Args()[0] v.volumeOptions(context) - err := v.volDriver.Delete(api.VolumeID(volumeID)) + err := v.volDriver.Delete(volumeID) if err != nil { cmdError(context, fn, err) return @@ -279,14 +284,14 @@ func (v *volDriver) volumeDelete(context *cli.Context) { func (v *volDriver) snapCreate(context *cli.Context) { var err error - var labels api.Labels + var labels map[string]string fn := "snapCreate" if len(context.Args()) != 1 { missingParameter(context, fn, "volumeID", "Invalid number of arguments") return } - volumeID := api.VolumeID(context.Args()[0]) + volumeID := context.Args()[0] v.volumeOptions(context) if l := context.String("label"); l != "" { @@ -295,7 +300,7 @@ func (v *volDriver) snapCreate(context *cli.Context) { return } } - locator := api.VolumeLocator{ + locator := &api.VolumeLocator{ Name: context.String("name"), VolumeLabels: labels, } @@ -310,7 +315,7 @@ func (v *volDriver) snapCreate(context *cli.Context) { } func (v *volDriver) snapEnumerate(context *cli.Context) { - var locator api.VolumeLocator + locator := &api.VolumeLocator{} var err error fn := "snap enumerate" @@ -333,7 +338,7 @@ func (v *volDriver) snapEnumerate(context *cli.Context) { cmdError(context, fn, err) return } - cmdOutput(context, snaps) + cmdOutputVolumes(snaps) } // baseVolumeCommand exports commands common to block and file volume drivers. @@ -377,7 +382,7 @@ func baseVolumeCommand(v *volDriver) []cli.Command { }, cli.IntFlag{ Name: "cos", - Usage: "Class of Service [1..9]", + Usage: "Class of Service: [1..9]", Value: 1, }, cli.IntFlag{ @@ -525,3 +530,14 @@ func FileVolumeCommands(name string) []cli.Command { return baseVolumeCommand(v) } + +func cmdOutputVolumes(volumes []*api.Volume) { + fmt.Print("{") + for i, volume := range volumes { + fmt.Print(cmdMarshalProto(volume)) + if i != len(volumes)-1 { + fmt.Print(",") + } + } + fmt.Println("}") +} diff --git a/cluster/cluster.go b/cluster/cluster.go index 26388cd68..43f064d60 100644 --- a/cluster/cluster.go +++ b/cluster/cluster.go @@ -7,20 +7,16 @@ import ( "github.com/fsouza/go-dockerclient" "github.com/libopenstorage/gossip/types" "github.com/libopenstorage/openstorage/api" - + "github.com/libopenstorage/openstorage/config" "github.com/portworx/kvdb" ) var ( - inst Cluster -) + inst *ClusterManager -type Config struct { - ClusterId string - NodeId string - MgtIface string - DataIface string -} + errClusterInitialized = errors.New("openstorage.cluster: already initialized") + errClusterNotInitialized = errors.New("openstorage.cluster: not initialized") +) // NodeEntry is used to discover other nodes in the cluster // and setup the gossip protocol with them. @@ -113,15 +109,14 @@ type Cluster interface { // Shutdown can be called when THIS node is gracefully shutting down. Shutdown() error - // Start starts the cluster manager and state machine. - // It also causes this node to join the cluster. - Start() error - ClusterData } -// New instantiates and starts a new cluster manager. -func New(cfg Config, kv kvdb.Kvdb, dockerClient *docker.Client) Cluster { +// Init instantiates a new cluster manager. +func Init(cfg config.ClusterConfig, kv kvdb.Kvdb, dockerClient *docker.Client) error { + if inst != nil { + return errClusterInitialized + } inst = &ClusterManager{ listeners: list.New(), config: cfg, @@ -129,28 +124,24 @@ func New(cfg Config, kv kvdb.Kvdb, dockerClient *docker.Client) Cluster { nodeCache: make(map[string]api.Node), docker: dockerClient, } - return inst + return nil } // Start will run the cluster manager daemon. func Start() error { if inst == nil { - return errors.New("Cluster is not initialized.") + return errClusterNotInitialized } - - err := inst.Start() - if err != nil { - inst = nil + if err := inst.start(); err != nil { return err } - return nil } // Inst returns an instance of an already instantiated cluster manager. func Inst() (Cluster, error) { if inst == nil { - return nil, errors.New("Cluster is not initialized.") + return nil, errClusterNotInitialized } return inst, nil } diff --git a/cluster/database.go b/cluster/database.go index d4cc3429c..f26c6f145 100644 --- a/cluster/database.go +++ b/cluster/database.go @@ -5,7 +5,8 @@ import ( "encoding/json" "strings" - "github.com/Sirupsen/logrus" + "go.pedge.io/dlog" + "github.com/libopenstorage/openstorage/api" "github.com/portworx/kvdb" ) @@ -13,21 +14,23 @@ import ( func readDatabase() (Database, error) { kvdb := kvdb.Instance() - db := Database{Status: api.StatusInit, - NodeEntries: make(map[string]NodeEntry)} + db := Database{ + Status: api.Status_STATUS_INIT, + NodeEntries: make(map[string]NodeEntry), + } kv, err := kvdb.Get("cluster/database") if err != nil && !strings.Contains(err.Error(), "Key not found") { - logrus.Warn("Warning, could not read cluster database") + dlog.Warnln("Warning, could not read cluster database") return db, err } if kv == nil || bytes.Compare(kv.Value, []byte("{}")) == 0 { - logrus.Info("Cluster is uninitialized...") + dlog.Infoln("Cluster is uninitialized...") return db, nil } if err := json.Unmarshal(kv.Value, &db); err != nil { - logrus.Warn("Fatal, Could not parse cluster database ", kv) + dlog.Warnln("Fatal, Could not parse cluster database ", kv) return db, err } @@ -38,15 +41,15 @@ func writeDatabase(db *Database) error { kvdb := kvdb.Instance() b, err := json.Marshal(db) if err != nil { - logrus.Warnf("Fatal, Could not marshal cluster database to JSON: %v", err) + dlog.Warnf("Fatal, Could not marshal cluster database to JSON: %v", err) return err } if _, err := kvdb.Put("cluster/database", b, 0); err != nil { - logrus.Warnf("Fatal, Could not marshal cluster database to JSON: %v", err) + dlog.Warnf("Fatal, Could not marshal cluster database to JSON: %v", err) return err } - logrus.Info("Cluster database updated.") + dlog.Infoln("Cluster database updated.") return nil } diff --git a/cluster/manager.go b/cluster/manager.go index bff6d4fb3..3c4f24570 100644 --- a/cluster/manager.go +++ b/cluster/manager.go @@ -10,11 +10,13 @@ import ( "net" "time" - "github.com/Sirupsen/logrus" + "go.pedge.io/dlog" + "github.com/fsouza/go-dockerclient" "github.com/libopenstorage/gossip" "github.com/libopenstorage/gossip/types" "github.com/libopenstorage/openstorage/api" + "github.com/libopenstorage/openstorage/config" "github.com/portworx/kvdb" "github.com/portworx/systemutils" @@ -26,7 +28,7 @@ const ( type ClusterManager struct { listeners *list.List - config Config + config config.ClusterConfig kv kvdb.Kvdb status api.Status nodeCache map[string]api.Node // Cached info on the nodes in the cluster. @@ -63,7 +65,7 @@ func ifaceToIp(iface *net.Interface) (string, error) { return "", errors.New("Node not connected to the network.") } -func externalIp(config *Config) (string, error) { +func externalIp(config *config.ClusterConfig) (string, error) { if config.MgtIface != "" { iface, err := net.InterfaceByName(config.MgtIface) @@ -103,7 +105,7 @@ func (c *ClusterManager) LocateNode(nodeID string) (api.Node, error) { } func (c *ClusterManager) AddEventListener(listener ClusterListener) error { - logrus.Printf("Adding cluster event listener: %s", listener.String()) + dlog.Printf("Adding cluster event listener: %s", listener.String()) c.listeners.PushBack(listener) return nil } @@ -139,20 +141,20 @@ func (c *ClusterManager) getLatestNodeConfig(nodeId string) *NodeEntry { kvdb := kvdb.Instance() kvlock, err := kvdb.Lock("cluster/lock", 20) if err != nil { - logrus.Warn(" Unable to obtain cluster lock for updating config", err) + dlog.Warnln(" Unable to obtain cluster lock for updating config", err) return nil } defer kvdb.Unlock(kvlock) db, err := readDatabase() if err != nil { - logrus.Warn("Failed to read the database for updating config") + dlog.Warnln("Failed to read the database for updating config") return nil } ne, exists := db.NodeEntries[nodeId] if !exists { - logrus.Warn("Could not find info for node with id ", nodeId) + dlog.Warnln("Could not find info for node with id ", nodeId) return nil } @@ -168,9 +170,9 @@ func (c *ClusterManager) initNode(db *Database) (*api.Node, bool) { db.NodeEntries[c.config.NodeId] = NodeEntry{Id: c.selfNode.Id, Ip: c.selfNode.Ip, GenNumber: c.selfNode.GenNumber} - logrus.Infof("Node %s joining cluster...", c.config.NodeId) - logrus.Infof("Cluster ID: %s", c.config.ClusterId) - logrus.Infof("Node IP: %s", c.selfNode.Ip) + dlog.Infof("Node %s joining cluster...", c.config.NodeId) + dlog.Infof("Cluster ID: %s", c.config.ClusterId) + dlog.Infof("Node IP: %s", c.selfNode.Ip) return &c.selfNode, exists } @@ -179,15 +181,15 @@ func (c *ClusterManager) cleanupInit(db *Database, self *api.Node) error { var resErr error var err error - logrus.Infof("Cleanup Init services") + dlog.Infof("Cleanup Init services") for e := c.listeners.Front(); e != nil; e = e.Next() { - logrus.Warnf("Cleanup Init for service %s.", + dlog.Warnf("Cleanup Init for service %s.", e.Value.(ClusterListener).String()) err = e.Value.(ClusterListener).CleanupInit(self, db) if err != nil { - logrus.Warnf("Failed to Cleanup Init %s: %v", + dlog.Warnf("Failed to Cleanup Init %s: %v", e.Value.(ClusterListener).String(), err) resErr = err } @@ -210,7 +212,7 @@ func (c *ClusterManager) joinCluster(db *Database, self *api.Node, exist bool) e for e := c.listeners.Front(); e != nil; e = e.Next() { err = e.Value.(ClusterListener).Init(self, db) if err != nil { - logrus.Warnf("Failed to initialize Init %s: %v", + dlog.Warnf("Failed to initialize Init %s: %v", e.Value.(ClusterListener).String(), err) c.cleanupInit(db, self) goto done @@ -222,7 +224,7 @@ found: for e := c.listeners.Front(); e != nil; e = e.Next() { err = e.Value.(ClusterListener).Join(self, db) if err != nil { - logrus.Warnf("Failed to initialize Join %s: %v", + dlog.Warnf("Failed to initialize Join %s: %v", e.Value.(ClusterListener).String(), err) if exist == false { @@ -236,11 +238,11 @@ found: if id != c.config.NodeId { // Check to see if the IP is the same. If it is, then we have a stale entry. if n.Ip == self.Ip { - logrus.Warnf("Warning, Detected node %s with the same IP %s in the database. Will not connect to this node.", + dlog.Warnf("Warning, Detected node %s with the same IP %s in the database. Will not connect to this node.", id, n.Ip) } else { // Gossip with this node. - logrus.Infof("Connecting to node %s with IP %s.", id, n.Ip) + dlog.Infof("Connecting to node %s with IP %s.", id, n.Ip) c.g.AddNode(n.Ip+":9002", types.NodeId(c.config.NodeId)) } } @@ -257,7 +259,7 @@ func (c *ClusterManager) initCluster(db *Database, self *api.Node, exist bool) e for e := c.listeners.Front(); e != nil; e = e.Next() { err = e.Value.(ClusterListener).ClusterInit(self, db) if err != nil { - logrus.Printf("Failed to initialize %s", + dlog.Printf("Failed to initialize %s", e.Value.(ClusterListener).String()) goto done } @@ -265,7 +267,7 @@ func (c *ClusterManager) initCluster(db *Database, self *api.Node, exist bool) e err = c.joinCluster(db, self, exist) if err != nil { - logrus.Printf("Failed to join new cluster") + dlog.Printf("Failed to join new cluster") goto done } @@ -283,7 +285,7 @@ func (c *ClusterManager) heartBeat() { currTime := time.Now() if currTime.Sub(lastUpdateTs) > 10*time.Second { - logrus.Warn("No gossip update for 10 seconds") + dlog.Warnln("No gossip update for 10 seconds") } c.g.UpdateSelf(gossipStoreKey, *node) lastUpdateTs = currTime @@ -302,19 +304,19 @@ func (c *ClusterManager) heartBeat() { if nodeInfo.Value != nil { n, ok = nodeInfo.Value.(api.Node) if !ok { - logrus.Error("Received a bad broadcast packet: %v", nodeInfo.Value) + dlog.Errorln("Received a bad broadcast packet: %v", nodeInfo.Value) continue } } if nodeFoundInCache { - if n.Status != api.StatusOk { - logrus.Warn("Detected node ", n.Id, " to be unhealthy.") + if n.Status != api.Status_STATUS_OK { + dlog.Warnln("Detected node ", n.Id, " to be unhealthy.") for e := c.listeners.Front(); e != nil && c.gEnabled; e = e.Next() { err := e.Value.(ClusterListener).Update(&n) if err != nil { - logrus.Warn("Failed to notify ", e.Value.(ClusterListener).String()) + dlog.Warnln("Failed to notify ", e.Value.(ClusterListener).String()) } } @@ -323,32 +325,32 @@ func (c *ClusterManager) heartBeat() { } else if nodeInfo.Status == types.NODE_STATUS_DOWN { ne := c.getLatestNodeConfig(string(id)) if ne != nil && nodeInfo.GenNumber < ne.GenNumber { - logrus.Warn("Detected stale update for node ", id, + dlog.Warnln("Detected stale update for node ", id, " going down, ignoring it") c.g.MarkNodeHasOldGen(id) delete(c.nodeCache, cachedNodeInfo.Id) continue } - logrus.Warn("Detected node ", id, " to be offline due to inactivity.") + dlog.Warnln("Detected node ", id, " to be offline due to inactivity.") - n.Status = api.StatusOffline + n.Status = api.Status_STATUS_OFFLINE for e := c.listeners.Front(); e != nil && c.gEnabled; e = e.Next() { err := e.Value.(ClusterListener).Update(&n) if err != nil { - logrus.Warn("Failed to notify ", e.Value.(ClusterListener).String()) + dlog.Warnln("Failed to notify ", e.Value.(ClusterListener).String()) } } delete(c.nodeCache, cachedNodeInfo.Id) } else if nodeInfo.Status == types.NODE_STATUS_DOWN_WAITING_FOR_NEW_UPDATE { - logrus.Warn("Detected node ", n.Id, " to be offline due to inactivity.") + dlog.Warnln("Detected node ", n.Id, " to be offline due to inactivity.") - n.Status = api.StatusOffline + n.Status = api.Status_STATUS_OFFLINE for e := c.listeners.Front(); e != nil && c.gEnabled; e = e.Next() { err := e.Value.(ClusterListener).Update(&n) if err != nil { - logrus.Warn("Failed to notify ", e.Value.(ClusterListener).String()) + dlog.Warnln("Failed to notify ", e.Value.(ClusterListener).String()) } } @@ -360,13 +362,13 @@ func (c *ClusterManager) heartBeat() { } } else if nodeInfo.Status == types.NODE_STATUS_UP { // A node discovered in the cluster. - logrus.Warn("Detected node ", n.Id, " to be in the cluster.") + dlog.Warnln("Detected node ", n.Id, " to be in the cluster.") c.nodeCache[n.Id] = n for e := c.listeners.Front(); e != nil && c.gEnabled; e = e.Next() { err := e.Value.(ClusterListener).Add(&n) if err != nil { - logrus.Warn("Failed to notify ", e.Value.(ClusterListener).String()) + dlog.Warnln("Failed to notify ", e.Value.(ClusterListener).String()) } } } @@ -377,12 +379,12 @@ func (c *ClusterManager) heartBeat() { } func (c *ClusterManager) DisableUpdates() { - logrus.Warn("Disabling gossip updates") + dlog.Warnln("Disabling gossip updates") c.gEnabled = false } func (c *ClusterManager) EnableUpdates() { - logrus.Warn("Enabling gossip updates") + dlog.Warnln("Enabling gossip updates") c.gEnabled = true } @@ -401,14 +403,14 @@ func (c *ClusterManager) GetState() *ClusterState { History: history, NodeStatus: nodes} } -func (c *ClusterManager) Start() error { - logrus.Info("Cluster manager starting...") +func (c *ClusterManager) start() error { + dlog.Infoln("Cluster manager starting...") c.gEnabled = true c.selfNode = api.Node{} c.selfNode.GenNumber = uint64(time.Now().UnixNano()) c.selfNode.Id = c.config.NodeId - c.selfNode.Status = api.StatusOk + c.selfNode.Status = api.Status_STATUS_OK c.selfNode.Ip, _ = externalIp(&c.config) c.selfNode.NodeData = make(map[string]interface{}) c.system = systemutils.New() @@ -423,44 +425,44 @@ func (c *ClusterManager) Start() error { kvdb := kvdb.Instance() kvlock, err := kvdb.Lock("cluster/lock", 60) if err != nil { - logrus.Panic("Fatal, Unable to obtain cluster lock.", err) + dlog.Panicln("Fatal, Unable to obtain cluster lock.", err) } defer kvdb.Unlock(kvlock) db, err := readDatabase() if err != nil { - logrus.Panic(err) + dlog.Panicln(err) } - if db.Status == api.StatusInit { - logrus.Info("Will initialize a new cluster.") + if db.Status == api.Status_STATUS_INIT { + dlog.Infoln("Will initialize a new cluster.") - c.status = api.StatusOk - db.Status = api.StatusOk + c.status = api.Status_STATUS_OK + db.Status = api.Status_STATUS_OK self, _ := c.initNode(&db) err = c.initCluster(&db, self, false) if err != nil { - logrus.Error("Failed to initialize the cluster.", err) + dlog.Errorln("Failed to initialize the cluster.", err) return err } // Update the new state of the cluster in the KV Database err := writeDatabase(&db) if err != nil { - logrus.Error("Failed to save the database.", err) + dlog.Errorln("Failed to save the database.", err) return err } - } else if db.Status&api.StatusOk > 0 { - logrus.Info("Cluster state is OK... Joining the cluster.") + } else if db.Status&api.Status_STATUS_OK > 0 { + dlog.Infoln("Cluster state is OK... Joining the cluster.") - c.status = api.StatusOk + c.status = api.Status_STATUS_OK self, exist := c.initNode(&db) err = c.joinCluster(&db, self, exist) if err != nil { - logrus.Error("Failed to join cluster.", err) + dlog.Errorln("Failed to join cluster.", err) return err } @@ -504,15 +506,15 @@ func (c *ClusterManager) Remove(nodes []api.Node) error { func (c *ClusterManager) Shutdown() error { db, err := readDatabase() if err != nil { - logrus.Warnf("Could not read cluster database (%v).", err) + dlog.Warnf("Could not read cluster database (%v).", err) return err } // Alert all listeners that we are shutting this node down. for e := c.listeners.Front(); e != nil; e = e.Next() { - logrus.Infof("Shutting down %s", e.Value.(ClusterListener).String()) + dlog.Infof("Shutting down %s", e.Value.(ClusterListener).String()) if err := e.Value.(ClusterListener).Halt(&c.selfNode, &db); err != nil { - logrus.Warnf("Failed to shutdown %s", + dlog.Warnf("Failed to shutdown %s", e.Value.(ClusterListener).String()) } } diff --git a/cmd/osd/main.go b/cmd/osd/main.go index e68ea3cc1..4fa5d5499 100644 --- a/cmd/osd/main.go +++ b/cmd/osd/main.go @@ -6,17 +6,11 @@ import ( "os" "runtime" + "go.pedge.io/dlog" + "github.com/codegangsta/cli" "github.com/docker/docker/pkg/reexec" "github.com/fsouza/go-dockerclient" - - "github.com/portworx/kvdb" - "github.com/portworx/kvdb/consul" - "github.com/portworx/kvdb/etcd" - "github.com/portworx/kvdb/mem" - - "github.com/Sirupsen/logrus" - "github.com/libopenstorage/openstorage/api" "github.com/libopenstorage/openstorage/api/server" osdcli "github.com/libopenstorage/openstorage/cli" @@ -25,14 +19,13 @@ import ( "github.com/libopenstorage/openstorage/graph/drivers" "github.com/libopenstorage/openstorage/volume" "github.com/libopenstorage/openstorage/volume/drivers" -) - -const ( - version = "0.3" + "github.com/portworx/kvdb" + "github.com/portworx/kvdb/consul" + "github.com/portworx/kvdb/etcd" + "github.com/portworx/kvdb/mem" ) func start(c *cli.Context) { - var cm cluster.Cluster if !osdcli.DaemonMode(c) { cli.ShowAppHelp(c) @@ -44,12 +37,12 @@ func start(c *cli.Context) { // We are in daemon mode. file := c.String("file") if file == "" { - logrus.Warn("OSD configuration file not specified. Visit openstorage.org for an example.") + dlog.Warnln("OSD configuration file not specified. Visit openstorage.org for an example.") return } cfg, err := config.Parse(file) if err != nil { - logrus.Error(err) + dlog.Errorln(err) return } kvdbURL := c.String("kvdb") @@ -59,62 +52,65 @@ func start(c *cli.Context) { kv, err := kvdb.New(scheme, "openstorage", []string{u.String()}, nil) if err != nil { - logrus.Warnf("Failed to initialize KVDB: %v (%v)", scheme, err) - logrus.Warnf("Supported datastores: %v", datastores) + dlog.Warnf("Failed to initialize KVDB: %v (%v)", scheme, err) + dlog.Warnf("Supported datastores: %v", datastores) return } err = kvdb.SetInstance(kv) if err != nil { - logrus.Warnf("Failed to initialize KVDB: %v", err) + dlog.Warnf("Failed to initialize KVDB: %v", err) return } // Start the cluster state machine, if enabled. if cfg.Osd.ClusterConfig.NodeId != "" && cfg.Osd.ClusterConfig.ClusterId != "" { - logrus.Infof("OSD enabling cluster mode.") + dlog.Infof("OSD enabling cluster mode.") dockerClient, err := docker.NewClientFromEnv() if err != nil { - logrus.Warnf("Failed to initialize docker client: %v", err) + dlog.Warnf("Failed to initialize docker client: %v", err) + return + } + if err := cluster.Init(cfg.Osd.ClusterConfig, kv, dockerClient); err != nil { + dlog.Errorln(err) + return + } + if err := cluster.Init(cfg.Osd.ClusterConfig, kv, dockerClient); err != nil { + dlog.Warnf("Unable to init cluster server: %v", err) return } - - cm = cluster.New(cfg.Osd.ClusterConfig, kv, dockerClient) if err := server.StartClusterAPI(config.ClusterAPIBase); err != nil { - logrus.Warnf("Unable to start cluster API server: %v", err) + dlog.Warnf("Unable to start cluster API server: %v", err) return } } // Start the volume drivers. for d, v := range cfg.Osd.Drivers { - logrus.Infof("Starting volume driver: %v", d) + dlog.Infof("Starting volume driver: %v", d) if _, err := volume.New(d, v); err != nil { - logrus.Warnf("Unable to start volume driver: %v, %v", d, err) + dlog.Warnf("Unable to start volume driver: %v, %v", d, err) return } if err := server.StartPluginAPI(d, config.DriverAPIBase, config.PluginAPIBase); err != nil { - logrus.Warnf("Unable to start volume plugin: %v", err) + dlog.Warnf("Unable to start volume plugin: %v", err) return } } // Start the graph drivers. for d, _ := range cfg.Osd.GraphDrivers { - logrus.Infof("Starting graph driver: %v", d) + dlog.Infof("Starting graph driver: %v", d) if err := server.StartGraphAPI(d, config.PluginAPIBase); err != nil { - logrus.Warnf("Unable to start graph plugin: %v", err) + dlog.Warnf("Unable to start graph plugin: %v", err) return } } - - if cm != nil { - if err := cm.Start(); err != nil { - logrus.Warnf("Unable to start cluster manager: %v", err) - return - } + if err := cluster.Start(); err != nil { + dlog.Warnf("Unable to start cluster manager: %v", err) + return } // Daemon does not exit. @@ -122,7 +118,7 @@ func start(c *cli.Context) { } func showVersion(c *cli.Context) { - fmt.Println("OSD Version:", version) + fmt.Println("OSD Version:", config.Version) fmt.Println("Go Version:", runtime.Version()) fmt.Println("OS:", runtime.GOOS) fmt.Println("Arch:", runtime.GOARCH) @@ -135,7 +131,7 @@ func main() { app := cli.NewApp() app.Name = "osd" app.Usage = "Open Storage CLI" - app.Version = version + app.Version = config.Version app.Flags = []cli.Flag{ cli.BoolFlag{ Name: "json,j", @@ -186,7 +182,8 @@ func main() { // Register all volume drivers with the CLI. for _, v := range volumedrivers.AllDrivers { - if v.DriverType&api.Block == api.Block { + // TODO(pedge): was an and, but we have no drivers that have two types + if v.DriverType == api.DriverType_DRIVER_TYPE_BLOCK { bCmds := osdcli.BlockVolumeCommands(v.Name) cmds := append(bCmds) c := cli.Command{ @@ -195,7 +192,8 @@ func main() { Subcommands: cmds, } app.Commands = append(app.Commands, c) - } else if v.DriverType&api.File == api.File { + // TODO(pedge): was an and, but we have no drivers that have two types + } else if v.DriverType == api.DriverType_DRIVER_TYPE_FILE { fCmds := osdcli.FileVolumeCommands(v.Name) cmds := append(fCmds) c := cli.Command{ @@ -209,7 +207,8 @@ func main() { // Register all graph drivers with the CLI. for _, v := range graphdrivers.AllDrivers { - if v.DriverType&api.Graph == api.Graph { + // TODO(pedge): was an and, but we have no drivers that have two types + if v.DriverType == api.DriverType_DRIVER_TYPE_GRAPH { cmds := osdcli.GraphDriverCommands(v.Name) c := cli.Command{ Name: v.Name, diff --git a/config/config.go b/config/config.go index b495376e4..0a4303649 100644 --- a/config/config.go +++ b/config/config.go @@ -7,21 +7,11 @@ import ( "gopkg.in/yaml.v2" - "github.com/libopenstorage/openstorage/cluster" - "github.com/libopenstorage/openstorage/volume" + "go.pedge.io/dlog/logrus" ) -type osd struct { - ClusterConfig cluster.Config `yaml:"cluster"` - Drivers map[string]volume.DriverParams - GraphDrivers map[string]volume.DriverParams -} - -type Config struct { - Osd osd -} - const ( + Version = "v1" PluginAPIBase = "/run/docker/plugins/" DriverAPIBase = "/var/lib/osd/driver/" GraphDriverAPIBase = "/var/lib/osd/graphdriver/" @@ -29,29 +19,42 @@ const ( UrlKey = "url" VersionKey = "version" MountBase = "/var/lib/osd/mounts/" + VolumeBase = "/var/lib/osd/" DataDir = ".data" - Version = "v1" ) -var ( - cfg Config -) +func init() { + os.MkdirAll(MountBase, 0755) + os.MkdirAll(GraphDriverAPIBase, 0755) + // TODO(pedge) eventually move to osd main.go when everyone is comfortable with dlog + dlog_logrus.Register() +} -func Parse(file string) (*Config, error) { +type ClusterConfig struct { + ClusterId string + NodeId string + MgtIface string + DataIface string +} - b, err := ioutil.ReadFile(file) - if err != nil { - return nil, fmt.Errorf("Unable to read the OSD configuration file (%s): %s", file, err.Error()) +type Config struct { + Osd struct { + ClusterConfig ClusterConfig `yaml:"cluster"` + // map[string]string is volume.VolumeParams equivalent + Drivers map[string]map[string]string + // map[string]string is volume.VolumeParams equivalent + GraphDrivers map[string]map[string]string } +} - err = yaml.Unmarshal(b, &cfg) +func Parse(filePath string) (*Config, error) { + config := &Config{} + data, err := ioutil.ReadFile(filePath) if err != nil { - fmt.Println("Unable to parse OSD configuration: ", err) + return nil, fmt.Errorf("Unable to read the OSD configuration file (%s): %s", filePath, err.Error()) + } + if err := yaml.Unmarshal(data, config); err != nil { return nil, fmt.Errorf("Unable to parse OSD configuration: %s", err.Error()) } - return &cfg, nil -} -func init() { - os.MkdirAll(MountBase, 0755) - os.MkdirAll(GraphDriverAPIBase, 0755) + return config, nil } diff --git a/graph/drivers/chainfs/chainfs.go b/graph/drivers/chainfs/chainfs.go index 2b3b52e29..b8f8814c2 100644 --- a/graph/drivers/chainfs/chainfs.go +++ b/graph/drivers/chainfs/chainfs.go @@ -21,6 +21,8 @@ import ( "os" "path" + "go.pedge.io/dlog" + "github.com/libopenstorage/openstorage/api" "github.com/libopenstorage/openstorage/graph" @@ -29,12 +31,11 @@ import ( "github.com/docker/docker/pkg/chrootarchive" "github.com/docker/docker/pkg/directory" "github.com/docker/docker/pkg/idtools" - - "github.com/Sirupsen/logrus" ) const ( Name = "chainfs" + Type = api.DriverType_DRIVER_TYPE_GRAPH Type = api.Graph virtPath = "/var/lib/openstorage/chainfs" ) @@ -43,7 +44,7 @@ type Driver struct { } func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { - logrus.Infof("Initializing libchainfs at home: %s and storage: %v...", home, virtPath) + dlog.Infof("Initializing libchainfs at home: %s and storage: %v...", home, virtPath) cVirtPath := C.CString(virtPath) go C.start_chainfs(1, cVirtPath) @@ -61,7 +62,7 @@ func (d *Driver) String() string { // held by the driver, e.g., unmounting all layered filesystems // known to this driver. func (d *Driver) Cleanup() error { - logrus.Infof("Stopping libchainfs at %s", virtPath) + dlog.Infof("Stopping libchainfs at %s", virtPath) C.stop_chainfs() return nil } @@ -78,9 +79,9 @@ func (d *Driver) Status() [][2]string { // specified id and parent and mountLabel. Parent and mountLabel may be "". func (d *Driver) Create(id string, parent string, ml string) error { if parent != "" { - logrus.Infof("Creating layer %s with parent %s", id, parent) + dlog.Infof("Creating layer %s with parent %s", id, parent) } else { - logrus.Infof("Creating parent layer %s", id) + dlog.Infof("Creating parent layer %s", id) } cID := C.CString(id) @@ -88,7 +89,7 @@ func (d *Driver) Create(id string, parent string, ml string) error { ret, err := C.create_layer(cID, cParent) if int(ret) != 0 { - logrus.Warnf("Error while creating layer %s", id) + dlog.Warnf("Error while creating layer %s", id) return err } @@ -97,13 +98,12 @@ func (d *Driver) Create(id string, parent string, ml string) error { // Remove attempts to remove the filesystem layer with this id. func (d *Driver) Remove(id string) error { - logrus.Infof("Removing layer %s", id) + dlog.Infof("Removing layer %s", id) cID := C.CString(id) - ret, err := C.remove_layer(cID) if int(ret) != 0 { - logrus.Warnf("Error while removing layer %s", id) + dlog.Warnf("Error while removing layer %s", id) return err } @@ -124,12 +124,11 @@ func (d *Driver) Get(id, mountLabel string) (string, error) { ret, err := C.alloc_chainfs(cID) if int(ret) != 0 { - logrus.Warnf("Error while creating a chain FS for %s", id) + dlog.Warnf("Error while creating a chain FS for %s", id) return "", err } else { - logrus.Debugf("Created a chain FS for %s", id) + dlog.Debugf("Created a chain FS for %s", id) chainPath := path.Join(virtPath, id) - return chainPath, err } } @@ -137,7 +136,7 @@ func (d *Driver) Get(id, mountLabel string) (string, error) { // Put releases the system resources for the specified id, // e.g, unmounting layered filesystem. func (d *Driver) Put(id string) error { - logrus.Debugf("Releasing chain FS for %s", id) + dlog.Debugf("Releasing chain FS for %s", id) cID := C.CString(id) _, err := C.release_chainfs(cID) @@ -170,17 +169,17 @@ func (d *Driver) ApplyDiff(id string, parent string, diff archive.Reader) (size dir := path.Join(virtPath, id) // dir := path.Join("/tmp/chainfs/", id) - logrus.Infof("Applying diff at path %s\n", dir) + dlog.Infof("Applying diff at path %s\n", dir) if err := chrootarchive.UntarUncompressed(diff, dir, nil); err != nil { - logrus.Warnf("Error while applying diff to %s: %v", id, err) + dlog.Warnf("Error while applying diff to %s: %v", id, err) return 0, err } // show invalid whiteouts warning. files, err := ioutil.ReadDir(path.Join(dir, archive.WhiteoutLinkDir)) if err == nil && len(files) > 0 { - logrus.Warnf("Archive contains aufs hardlink references that are not supported.") + dlog.Warnf("Archive contains aufs hardlink references that are not supported.") } return d.DiffSize(id, parent) diff --git a/graph/drivers/chainfs/unsupported.go b/graph/drivers/chainfs/unsupported.go index 3bbc4718d..9ba031513 100644 --- a/graph/drivers/chainfs/unsupported.go +++ b/graph/drivers/chainfs/unsupported.go @@ -12,7 +12,7 @@ import ( const ( Name = "chainfs" - Type = api.Graph + Type = api.DriverType_DRIVER_TYPE_GRAPH ) var ( diff --git a/graph/drivers/layer0/layer0.go b/graph/drivers/layer0/layer0.go index 6d4293131..359150222 100644 --- a/graph/drivers/layer0/layer0.go +++ b/graph/drivers/layer0/layer0.go @@ -8,13 +8,13 @@ import ( "sync" "sync/atomic" - "github.com/Sirupsen/logrus" + "go.pedge.io/dlog" + "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/daemon/graphdriver/overlay" "github.com/docker/docker/pkg/archive" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/pkg/parsers" - "github.com/libopenstorage/openstorage/api" "github.com/libopenstorage/openstorage/graph" "github.com/libopenstorage/openstorage/volume" @@ -35,7 +35,7 @@ type Layer0Vol struct { // path where the external volume is mounted. path string // volumeID mapping to this external volume - volumeID api.VolumeID + volumeID string // ref keeps track of mount and unmounts. ref int32 } @@ -55,7 +55,7 @@ type Layer0 struct { // Layer0Graphdriver options. This should be passed in as a st const ( Name = "layer0" - Type = api.Graph + Type = api.DriverType_DRIVER_TYPE_GRAPH Layer0VolumeDriver = "layer0.volume_driver" ) @@ -77,7 +77,7 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap return nil, fmt.Errorf("Unknown option %s\n", key) } } - logrus.Infof("Layer0 volume driver: %v", volumeDriver) + dlog.Infof("Layer0 volume driver: %v", volumeDriver) volDriver, err := volume.Get(volumeDriver) if err != nil { return nil, err @@ -147,17 +147,17 @@ func (l *Layer0) create(id, parent string) (string, *Layer0Vol, error) { vol, ok := l.volumes[id] if !ok { - logrus.Warnf("Failed to find layer0 volume for id %v", id) + dlog.Warnf("Failed to find layer0 volume for id %v", id) return id, nil, nil } // Query volume for Layer 0 - vols, err := l.volDriver.Enumerate(api.VolumeLocator{Name: vol.parent}, nil) + vols, err := l.volDriver.Enumerate(&api.VolumeLocator{Name: vol.parent}, nil) // If we don't find a volume configured for this image, // then don't track layer0 if err != nil || vols == nil { - logrus.Infof("Failed to find configured volume for id %v", vol.parent) + dlog.Infof("Failed to find configured volume for id %v", vol.parent) delete(l.volumes, id) return id, nil, nil } @@ -171,7 +171,7 @@ func (l *Layer0) create(id, parent string) (string, *Layer0Vol, error) { } } if index == -1 { - logrus.Infof("Failed to find free volume for id %v", vol.parent) + dlog.Infof("Failed to find free volume for id %v", vol.parent) delete(l.volumes, id) return id, nil, nil } @@ -180,23 +180,23 @@ func (l *Layer0) create(id, parent string) (string, *Layer0Vol, error) { os.MkdirAll(mountPath, 0755) // If this is a block driver, first attach the volume. - if l.volDriver.Type()&api.Block != 0 { - _, err := l.volDriver.Attach(vols[index].ID) + if l.volDriver.Type() == api.DriverType_DRIVER_TYPE_BLOCK { + _, err := l.volDriver.Attach(vols[index].Id) if err != nil { - logrus.Errorf("Failed to attach volume %v", vols[index].ID) + dlog.Errorf("Failed to attach volume %v", vols[index].Id) delete(l.volumes, id) return id, nil, nil } } - err = l.volDriver.Mount(vols[index].ID, mountPath) + err = l.volDriver.Mount(vols[index].Id, mountPath) if err != nil { - logrus.Errorf("Failed to mount volume %v at path %v", - vols[index].ID, mountPath) + dlog.Errorf("Failed to mount volume %v at path %v", + vols[index].Id, mountPath) delete(l.volumes, id) return id, nil, nil } vol.path = mountPath - vol.volumeID = vols[index].ID + vol.volumeID = vols[index].Id vol.ref = 1 return l.realID(id), vol, nil @@ -239,18 +239,18 @@ func (l *Layer0) Remove(id string) error { upperDir := path.Join(path.Join(l.home, l.realID(id)), "upper") err := os.Rename(upperDir, path.Join(v.path, "upper")) if err != nil { - logrus.Warnf("Failed in rename(%v): %v", id, err) + dlog.Warnf("Failed in rename(%v): %v", id, err) } l.Driver.Remove(l.realID(id)) err = l.volDriver.Unmount(v.volumeID, v.path) - if l.volDriver.Type()&api.Block != 0 { + if l.volDriver.Type() == api.DriverType_DRIVER_TYPE_BLOCK { _ = l.volDriver.Detach(v.volumeID) } err = os.RemoveAll(v.path) delete(l.volumes, v.id) } } else { - logrus.Warnf("Failed to find layer0 vol for id %v", id) + dlog.Warnf("Failed to find layer0 vol for id %v", id) } return err } diff --git a/graph/drivers/proxy/proxy.go b/graph/drivers/proxy/proxy.go index f3f6c8586..37da47b51 100644 --- a/graph/drivers/proxy/proxy.go +++ b/graph/drivers/proxy/proxy.go @@ -8,9 +8,9 @@ import ( const ( Name = "proxy" - Type = api.Graph + Type = api.DriverType_DRIVER_TYPE_GRAPH ) func init() { - graph.Register("proxy", overlay.Init) + graph.Register(Name, overlay.Init) } diff --git a/pkg/jsonpb/LICENSE b/pkg/jsonpb/LICENSE new file mode 100644 index 000000000..1b1b1921e --- /dev/null +++ b/pkg/jsonpb/LICENSE @@ -0,0 +1,31 @@ +Go support for Protocol Buffers - Google's data interchange format + +Copyright 2010 The Go Authors. All rights reserved. +https://github.com/golang/protobuf + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/pkg/jsonpb/jsonpb.diff b/pkg/jsonpb/jsonpb.diff new file mode 100644 index 000000000..7071b82e0 --- /dev/null +++ b/pkg/jsonpb/jsonpb.diff @@ -0,0 +1,37 @@ +51,52d50 +< "go.pedge.io/pb/go/google/protobuf" +< +65,66d62 +< // Whether to render enum values as simple strings, as opposed to string values. +< EnumsAsSimpleStrings bool +99,105d94 +< if v != nil { +< if timestamp, ok := v.(*google_protobuf.Timestamp); ok { +< out.write(`"`) +< out.write(timestamp.GoTime().String()) +< out.write(`"`) +< } +< } +216,219d204 +< type simpleStringer interface { +< SimpleString() string +< } +< +252,253c237 +< if (!m.EnumsAsInts || m.EnumsAsSimpleStrings) && prop.Enum != "" { +< var enumStr string +--- +> if !m.EnumsAsInts && prop.Enum != "" { +257,266c241 +< if m.EnumsAsSimpleStrings { +< obj, ok := v.Interface().(simpleStringer) +< if ok { +< enumStr = obj.SimpleString() +< } else { +< enumStr = v.Interface().(fmt.Stringer).String() +< } +< } else { +< enumStr = v.Interface().(fmt.Stringer).String() +< } +--- +> enumStr := v.Interface().(fmt.Stringer).String() diff --git a/pkg/jsonpb/jsonpb.go b/pkg/jsonpb/jsonpb.go new file mode 100644 index 000000000..9896a0389 --- /dev/null +++ b/pkg/jsonpb/jsonpb.go @@ -0,0 +1,549 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2015 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package jsonpb provides marshaling and unmarshaling between protocol buffers and JSON. +It follows the specification at https://developers.google.com/protocol-buffers/docs/proto3#json. + +This package produces a different output than the standard "encoding/json" package, +which does not operate correctly on protocol buffers. +*/ +package jsonpb + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "reflect" + "sort" + "strconv" + "strings" + + "go.pedge.io/pb/go/google/protobuf" + + "github.com/golang/protobuf/proto" +) + +var ( + byteArrayType = reflect.TypeOf([]byte{}) +) + +// Marshaler is a configurable object for converting between +// protocol buffer objects and a JSON representation for them +type Marshaler struct { + // Whether to render enum values as integers, as opposed to string values. + EnumsAsInts bool + // Whether to render enum values as simple strings, as opposed to string values. + EnumsAsSimpleStrings bool + + // A string to indent each level by. The presence of this field will + // also cause a space to appear between the field separator and + // value, and for newlines to be appear between fields and array + // elements. + Indent string +} + +// Marshal marshals a protocol buffer into JSON. +func (m *Marshaler) Marshal(out io.Writer, pb proto.Message) error { + writer := &errWriter{writer: out} + return m.marshalObject(writer, pb, "") +} + +// MarshalToString converts a protocol buffer object to JSON string. +func (m *Marshaler) MarshalToString(pb proto.Message) (string, error) { + var buf bytes.Buffer + if err := m.Marshal(&buf, pb); err != nil { + return "", err + } + return buf.String(), nil +} + +type int32Slice []int32 + +// For sorting extensions ids to ensure stable output. +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// marshalObject writes a struct to the Writer. +func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent string) error { + if v != nil { + if timestamp, ok := v.(*google_protobuf.Timestamp); ok { + out.write(`"`) + out.write(timestamp.GoTime().String()) + out.write(`"`) + } + } + out.write("{") + if m.Indent != "" { + out.write("\n") + } + + s := reflect.ValueOf(v).Elem() + firstField := true + for i := 0; i < s.NumField(); i++ { + value := s.Field(i) + valueField := s.Type().Field(i) + if strings.HasPrefix(valueField.Name, "XXX_") { + continue + } + + // TODO: proto3 objects should have default values omitted. + + // IsNil will panic on most value kinds. + switch value.Kind() { + case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + if value.IsNil() { + continue + } + } + + // Oneof fields need special handling. + if valueField.Tag.Get("protobuf_oneof") != "" { + // value is an interface containing &T{real_value}. + sv := value.Elem().Elem() // interface -> *T -> T + value = sv.Field(0) + valueField = sv.Type().Field(0) + } + prop := jsonProperties(valueField) + if !firstField { + m.writeSep(out) + } + if err := m.marshalField(out, prop, value, indent); err != nil { + return err + } + firstField = false + } + + // Handle proto2 extensions. + if ep, ok := v.(extendableProto); ok { + extensions := proto.RegisteredExtensions(v) + extensionMap := ep.ExtensionMap() + // Sort extensions for stable output. + ids := make([]int32, 0, len(extensionMap)) + for id := range extensionMap { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + for _, id := range ids { + desc := extensions[id] + if desc == nil { + // unknown extension + continue + } + ext, extErr := proto.GetExtension(ep, desc) + if extErr != nil { + return extErr + } + value := reflect.ValueOf(ext) + var prop proto.Properties + prop.Parse(desc.Tag) + prop.OrigName = fmt.Sprintf("[%s]", desc.Name) + if !firstField { + m.writeSep(out) + } + if err := m.marshalField(out, &prop, value, indent); err != nil { + return err + } + firstField = false + } + + } + + if m.Indent != "" { + out.write("\n") + out.write(indent) + } + out.write("}") + return out.err +} + +func (m *Marshaler) writeSep(out *errWriter) { + if m.Indent != "" { + out.write(",\n") + } else { + out.write(",") + } +} + +// marshalField writes field description and value to the Writer. +func (m *Marshaler) marshalField(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + } + out.write(`"`) + out.write(prop.OrigName) + out.write(`":`) + if m.Indent != "" { + out.write(" ") + } + if err := m.marshalValue(out, prop, v, indent); err != nil { + return err + } + return nil +} + +type simpleStringer interface { + SimpleString() string +} + +// marshalValue writes the value to the Writer. +func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { + + var err error + v = reflect.Indirect(v) + + // Handle repeated elements. + if v.Type() != byteArrayType && v.Kind() == reflect.Slice { + out.write("[") + comma := "" + for i := 0; i < v.Len(); i++ { + sliceVal := v.Index(i) + out.write(comma) + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + out.write(m.Indent) + } + m.marshalValue(out, prop, sliceVal, indent+m.Indent) + comma = "," + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + } + out.write("]") + return out.err + } + + // Handle enumerations. + if (!m.EnumsAsInts || m.EnumsAsSimpleStrings) && prop.Enum != "" { + var enumStr string + // Unknown enum values will are stringified by the proto library as their + // value. Such values should _not_ be quoted or they will be interpreted + // as an enum string instead of their value. + if m.EnumsAsSimpleStrings { + obj, ok := v.Interface().(simpleStringer) + if ok { + enumStr = obj.SimpleString() + } else { + enumStr = v.Interface().(fmt.Stringer).String() + } + } else { + enumStr = v.Interface().(fmt.Stringer).String() + } + var valStr string + if v.Kind() == reflect.Ptr { + valStr = strconv.Itoa(int(v.Elem().Int())) + } else { + valStr = strconv.Itoa(int(v.Int())) + } + isKnownEnum := enumStr != valStr + if isKnownEnum { + out.write(`"`) + } + out.write(enumStr) + if isKnownEnum { + out.write(`"`) + } + return out.err + } + + // Handle nested messages. + if v.Kind() == reflect.Struct { + return m.marshalObject(out, v.Addr().Interface().(proto.Message), indent+m.Indent) + } + + // Handle maps. + // Since Go randomizes map iteration, we sort keys for stable output. + if v.Kind() == reflect.Map { + out.write(`{`) + keys := v.MapKeys() + sort.Sort(mapKeys(keys)) + for i, k := range keys { + if i > 0 { + out.write(`,`) + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + out.write(m.Indent) + } + + b, err := json.Marshal(k.Interface()) + if err != nil { + return err + } + s := string(b) + + // If the JSON is not a string value, encode it again to make it one. + if !strings.HasPrefix(s, `"`) { + b, err := json.Marshal(s) + if err != nil { + return err + } + s = string(b) + } + + out.write(s) + out.write(`:`) + if m.Indent != "" { + out.write(` `) + } + + if err := m.marshalValue(out, prop, v.MapIndex(k), indent+m.Indent); err != nil { + return err + } + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + out.write(m.Indent) + } + out.write(`}`) + return out.err + } + + // Default handling defers to the encoding/json library. + b, err := json.Marshal(v.Interface()) + if err != nil { + return err + } + needToQuote := string(b[0]) != `"` && (v.Kind() == reflect.Int64 || v.Kind() == reflect.Uint64) + if needToQuote { + out.write(`"`) + } + out.write(string(b)) + if needToQuote { + out.write(`"`) + } + return out.err +} + +// Unmarshal unmarshals a JSON object stream into a protocol +// buffer. This function is lenient and will decode any options +// permutations of the related Marshaler. +func Unmarshal(r io.Reader, pb proto.Message) error { + inputValue := json.RawMessage{} + if err := json.NewDecoder(r).Decode(&inputValue); err != nil { + return err + } + return unmarshalValue(reflect.ValueOf(pb).Elem(), inputValue) +} + +// UnmarshalString will populate the fields of a protocol buffer based +// on a JSON string. This function is lenient and will decode any options +// permutations of the related Marshaler. +func UnmarshalString(str string, pb proto.Message) error { + return Unmarshal(strings.NewReader(str), pb) +} + +// unmarshalValue converts/copies a value into the target. +func unmarshalValue(target reflect.Value, inputValue json.RawMessage) error { + targetType := target.Type() + + // Allocate memory for pointer fields. + if targetType.Kind() == reflect.Ptr { + target.Set(reflect.New(targetType.Elem())) + return unmarshalValue(target.Elem(), inputValue) + } + + // Handle nested messages. + if targetType.Kind() == reflect.Struct { + var jsonFields map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &jsonFields); err != nil { + return err + } + + sprops := proto.GetProperties(targetType) + for i := 0; i < target.NumField(); i++ { + ft := target.Type().Field(i) + if strings.HasPrefix(ft.Name, "XXX_") { + continue + } + fieldName := jsonProperties(ft).OrigName + + valueForField, ok := jsonFields[fieldName] + if !ok { + continue + } + delete(jsonFields, fieldName) + + // Handle enums, which have an underlying type of int32, + // and may appear as strings. We do this while handling + // the struct so we have access to the enum info. + // The case of an enum appearing as a number is handled + // by the recursive call to unmarshalValue. + if enum := sprops.Prop[i].Enum; valueForField[0] == '"' && enum != "" { + vmap := proto.EnumValueMap(enum) + // Don't need to do unquoting; valid enum names + // are from a limited character set. + s := valueForField[1 : len(valueForField)-1] + n, ok := vmap[string(s)] + if !ok { + return fmt.Errorf("unknown value %q for enum %s", s, enum) + } + f := target.Field(i) + if f.Kind() == reflect.Ptr { // proto2 + f.Set(reflect.New(f.Type().Elem())) + f = f.Elem() + } + f.SetInt(int64(n)) + continue + } + + if err := unmarshalValue(target.Field(i), valueForField); err != nil { + return err + } + } + // Check for any oneof fields. + for fname, raw := range jsonFields { + if oop, ok := sprops.OneofTypes[fname]; ok { + nv := reflect.New(oop.Type.Elem()) + target.Field(oop.Field).Set(nv) + if err := unmarshalValue(nv.Elem().Field(0), raw); err != nil { + return err + } + delete(jsonFields, fname) + } + } + if len(jsonFields) > 0 { + // Pick any field to be the scapegoat. + var f string + for fname := range jsonFields { + f = fname + break + } + return fmt.Errorf("unknown field %q in %v", f, targetType) + } + return nil + } + + // Handle arrays (which aren't encoded bytes) + if targetType != byteArrayType && targetType.Kind() == reflect.Slice { + var slc []json.RawMessage + if err := json.Unmarshal(inputValue, &slc); err != nil { + return err + } + len := len(slc) + target.Set(reflect.MakeSlice(targetType, len, len)) + for i := 0; i < len; i++ { + if err := unmarshalValue(target.Index(i), slc[i]); err != nil { + return err + } + } + return nil + } + + // Handle maps (whose keys are always strings) + if targetType.Kind() == reflect.Map { + var mp map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &mp); err != nil { + return err + } + target.Set(reflect.MakeMap(targetType)) + for ks, raw := range mp { + // Unmarshal map key. The core json library already decoded the key into a + // string, so we handle that specially. Other types were quoted post-serialization. + var k reflect.Value + if targetType.Key().Kind() == reflect.String { + k = reflect.ValueOf(ks) + } else { + k = reflect.New(targetType.Key()).Elem() + if err := unmarshalValue(k, json.RawMessage(ks)); err != nil { + return err + } + } + + // Unmarshal map value. + v := reflect.New(targetType.Elem()).Elem() + if err := unmarshalValue(v, raw); err != nil { + return err + } + target.SetMapIndex(k, v) + } + return nil + } + + // 64-bit integers can be encoded as strings. In this case we drop + // the quotes and proceed as normal. + isNum := targetType.Kind() == reflect.Int64 || targetType.Kind() == reflect.Uint64 + if isNum && strings.HasPrefix(string(inputValue), `"`) { + inputValue = inputValue[1 : len(inputValue)-1] + } + + // Use the encoding/json for parsing other value types. + return json.Unmarshal(inputValue, target.Addr().Interface()) +} + +// jsonProperties returns parsed proto.Properties for the field. +func jsonProperties(f reflect.StructField) *proto.Properties { + var prop proto.Properties + prop.Init(f.Type, f.Name, f.Tag.Get("protobuf"), &f) + return &prop +} + +// extendableProto is an interface implemented by any protocol buffer that may be extended. +type extendableProto interface { + proto.Message + ExtensionRangeArray() []proto.ExtensionRange + ExtensionMap() map[int32]proto.Extension +} + +// Writer wrapper inspired by https://blog.golang.org/errors-are-values +type errWriter struct { + writer io.Writer + err error +} + +func (w *errWriter) write(str string) { + if w.err != nil { + return + } + _, w.err = w.writer.Write([]byte(str)) +} + +// Map fields may have key types of non-float scalars, strings and enums. +// The easiest way to sort them in some deterministic order is to use fmt. +// If this turns out to be inefficient we can always consider other options, +// such as doing a Schwartzian transform. +type mapKeys []reflect.Value + +func (s mapKeys) Len() int { return len(s) } +func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s mapKeys) Less(i, j int) bool { + return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface()) +} diff --git a/pkg/mount/mount.go b/pkg/mount/mount.go index ef4e06aa0..e2abe7df2 100644 --- a/pkg/mount/mount.go +++ b/pkg/mount/mount.go @@ -8,7 +8,7 @@ import ( "sync" "syscall" - "github.com/Sirupsen/logrus" + "go.pedge.io/dlog" ) // Mangager defines the interface for keep track of volume driver mounts. @@ -133,7 +133,7 @@ func (m *Mounter) Mount(minor int, device, path, fs string, flags uintptr, data dev, ok := m.paths[path] if ok && dev != device { - logrus.Warnf("cannot mount %q, device %q is mounted at %q", device, dev, path) + dlog.Warnf("cannot mount %q, device %q is mounted at %q", device, dev, path) return ErrExist } info, ok := m.mounts[device] @@ -148,7 +148,7 @@ func (m *Mounter) Mount(minor int, device, path, fs string, flags uintptr, data // Validate input params if fs != info.Fs { - logrus.Warnf("%s Existing mountpoint has fs %q cannot change to %q", + dlog.Warnf("%s Existing mountpoint has fs %q cannot change to %q", device, info.Fs, fs) return ErrEinval } @@ -194,7 +194,7 @@ func (m *Mounter) Unmount(device, path string) error { if _, pathExists := m.paths[path]; pathExists { delete(m.paths, path) } else { - logrus.Warnf("Path %q for device %q does not exist in pathMap", path, device) + dlog.Warnf("Path %q for device %q does not exist in pathMap", path, device) } // Blow away this mountpoint. info.Mountpoint[i] = info.Mountpoint[len(info.Mountpoint)-1] diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go index a52743bef..be8b517b3 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/error.go @@ -42,6 +42,23 @@ type Error interface { OrigErr() error } +// BatchError is a batch of errors which also wraps lower level errors with code, message, +// and original errors. Calling Error() will only return the error that is at the end +// of the list. +type BatchError interface { + // Satisfy the generic error interface. + error + + // Returns the short phrase depicting the classification of the error. + Code() string + + // Returns the error details message. + Message() string + + // Returns the original error if one was set. Nil is returned if not set. + OrigErrs() []error +} + // New returns an Error object described by the code, message, and origErr. // // If origErr satisfies the Error interface it will not be wrapped within a new @@ -53,6 +70,11 @@ func New(code, message string, origErr error) Error { return newBaseError(code, message, origErr) } +// NewBatchError returns an baseError with an expectation of an array of errors +func NewBatchError(code, message string, errs []error) BatchError { + return newBaseErrors(code, message, errs) +} + // A RequestFailure is an interface to extract request failure information from // an Error such as the request ID of the failed request returned by a service. // RequestFailures may not always have a requestID value if the request failed diff --git a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go index 003a6e806..605f73c5d 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/awserr/types.go @@ -31,7 +31,7 @@ type baseError struct { // Optional original error this error is based off of. Allows building // chained errors. - origErr error + errs []error } // newBaseError returns an error object for the code, message, and err. @@ -43,11 +43,34 @@ type baseError struct { // // origErr is the error object which will be nested under the new error to be returned. func newBaseError(code, message string, origErr error) *baseError { - return &baseError{ + b := &baseError{ code: code, message: message, - origErr: origErr, } + + if origErr != nil { + b.errs = append(b.errs, origErr) + } + + return b +} + +// newBaseErrors returns an error object for the code, message, and errors. +// +// code is a short no whitespace phrase depicting the classification of +// the error that is being created. +// +// message is the free flow string containing detailed information about the error. +// +// origErrs is the error objects which will be nested under the new errors to be returned. +func newBaseErrors(code, message string, origErrs []error) *baseError { + b := &baseError{ + code: code, + message: message, + errs: origErrs, + } + + return b } // Error returns the string representation of the error. @@ -56,7 +79,12 @@ func newBaseError(code, message string, origErr error) *baseError { // // Satisfies the error interface. func (b baseError) Error() string { - return SprintError(b.code, b.message, "", b.origErr) + size := len(b.errs) + if size > 0 { + return SprintError(b.code, b.message, "", errorList(b.errs)) + } + + return SprintError(b.code, b.message, "", nil) } // String returns the string representation of the error. @@ -76,9 +104,20 @@ func (b baseError) Message() string { } // OrigErr returns the original error if one was set. Nil is returned if no error -// was set. +// was set. This only returns the first element in the list. If the full list is +// needed, use BatchError func (b baseError) OrigErr() error { - return b.origErr + if size := len(b.errs); size > 0 { + return b.errs[0] + } + + return nil +} + +// OrigErrs returns the original errors if one was set. An empty slice is returned if +// no error was set:w +func (b baseError) OrigErrs() []error { + return b.errs } // So that the Error interface type can be included as an anonymous field @@ -133,3 +172,26 @@ func (r requestError) StatusCode() int { func (r requestError) RequestID() string { return r.requestID } + +// An error list that satisfies the golang interface +type errorList []error + +// Error returns the string representation of the error. +// +// Satisfies the error interface. +func (e errorList) Error() string { + msg := "" + // How do we want to handle the array size being zero + if size := len(e); size > 0 { + for i := 0; i < size; i++ { + msg += fmt.Sprintf("%s", e[i].Error()) + // We check the next index to see if it is within the slice. + // If it is, then we append a newline. We do this, because unit tests + // could be broken with the additional '\n' + if i+1 < size { + msg += "\n" + } + } + } + return msg +} diff --git a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go index e639ce040..c8d0564d8 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/client/client.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/client/client.go @@ -108,7 +108,7 @@ const logRespMsg = `DEBUG: Response %s/%s Details: -----------------------------------------------------` func logResponse(r *request.Request) { - var msg = "no reponse data" + var msg = "no response data" if r.HTTPResponse != nil { logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) dumpedBody, _ := httputil.DumpResponse(r.HTTPResponse, logBody) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/config.go b/vendor/github.com/aws/aws-sdk-go/aws/config.go index 75fcc8284..9e83e9260 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/config.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/config.go @@ -18,6 +18,11 @@ type RequestRetryer interface{} // A Config provides service configuration for service clients. By default, // all clients will use the {defaults.DefaultConfig} structure. type Config struct { + // Enables verbose error printing of all credential chain errors. + // Should be used when wanting to see all errors while attempting to retreive + // credentials. + CredentialsChainVerboseErrors *bool + // The credentials object to use when signing requests. Defaults to // a chain of credential providers to search for credentials in environment // variables, shared credential file, and EC2 Instance Roles. @@ -95,6 +100,20 @@ type Config struct { // Amazon S3: Virtual Hosting of Buckets S3ForcePathStyle *bool + // Set this to `true` to disable the EC2Metadata client from overriding the + // default http.Client's Timeout. This is helpful if you do not want the EC2Metadata + // client to create a new http.Client. This options is only meaningful if you're not + // already using a custom HTTP client with the SDK. Enabled by default. + // + // Must be set and provided to the session.New() in order to disable the EC2Metadata + // overriding the timeout for default credentials chain. + // + // Example: + // sess := session.New(aws.NewConfig().WithEC2MetadataDiableTimeoutOverride(true)) + // svc := s3.New(sess) + // + EC2MetadataDisableTimeoutOverride *bool + SleepDelay func(time.Duration) } @@ -107,6 +126,13 @@ func NewConfig() *Config { return &Config{} } +// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning +// a Config pointer. +func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config { + c.CredentialsChainVerboseErrors = &verboseErrs + return c +} + // WithCredentials sets a config Credentials value returning a Config pointer // for chaining. func (c *Config) WithCredentials(creds *credentials.Credentials) *Config { @@ -184,6 +210,13 @@ func (c *Config) WithS3ForcePathStyle(force bool) *Config { return c } +// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value +// returning a Config pointer for chaining. +func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config { + c.EC2MetadataDisableTimeoutOverride = &enable + return c +} + // WithSleepDelay overrides the function used to sleep while waiting for the // next retry. Defaults to time.Sleep. func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config { @@ -203,6 +236,10 @@ func mergeInConfig(dst *Config, other *Config) { return } + if other.CredentialsChainVerboseErrors != nil { + dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors + } + if other.Credentials != nil { dst.Credentials = other.Credentials } @@ -251,6 +288,10 @@ func mergeInConfig(dst *Config, other *Config) { dst.S3ForcePathStyle = other.S3ForcePathStyle } + if other.EC2MetadataDisableTimeoutOverride != nil { + dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride + } + if other.SleepDelay != nil { dst.SleepDelay = other.SleepDelay } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go index 115b40739..857311f64 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go @@ -8,8 +8,14 @@ var ( // ErrNoValidProvidersFoundInChain Is returned when there are no valid // providers in the ChainProvider. // + // This has been deprecated. For verbose error messaging set + // aws.Config.CredentialsChainVerboseErrors to true + // // @readonly - ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", "no valid providers in chain", nil) + ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", + `no valid providers in chain. Deprecated. + For verbose messaging see aws.Config.CredentialsChainVerboseErrors`, + nil) ) // A ChainProvider will search for a provider which returns credentials @@ -45,8 +51,9 @@ var ( // svc := ec2.New(&aws.Config{Credentials: creds}) // type ChainProvider struct { - Providers []Provider - curr Provider + Providers []Provider + curr Provider + VerboseErrors bool } // NewChainCredentials returns a pointer to a new Credentials object @@ -63,17 +70,23 @@ func NewChainCredentials(providers []Provider) *Credentials { // If a provider is found it will be cached and any calls to IsExpired() // will return the expired state of the cached provider. func (c *ChainProvider) Retrieve() (Value, error) { + var errs []error for _, p := range c.Providers { - if creds, err := p.Retrieve(); err == nil { + creds, err := p.Retrieve() + if err == nil { c.curr = p return creds, nil } + errs = append(errs, err) } c.curr = nil - // TODO better error reporting. maybe report error for each failed retrieve? - - return Value{}, ErrNoValidProvidersFoundInChain + var err error + err = ErrNoValidProvidersFoundInChain + if c.VerboseErrors { + err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs) + } + return Value{}, err } // IsExpired will returned the expired state of the currently cached provider diff --git a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go index 50f831c8f..42c883aa9 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/defaults/defaults.go @@ -83,13 +83,14 @@ func Handlers() request.Handlers { func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials { endpoint, signingRegion := endpoints.EndpointForRegion(ec2metadata.ServiceName, *cfg.Region, true) - return credentials.NewChainCredentials( - []credentials.Provider{ + return credentials.NewCredentials(&credentials.ChainProvider{ + VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), + Providers: []credentials.Provider{ &credentials.EnvProvider{}, &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, &ec2rolecreds.EC2RoleProvider{ Client: ec2metadata.NewClient(*cfg, handlers, endpoint, signingRegion), ExpiryWindow: 5 * time.Minute, }, - }) + }}) } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go index f0dc331e0..9e16c1cf7 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go @@ -4,7 +4,6 @@ package ec2metadata import ( "io/ioutil" - "net" "net/http" "time" @@ -26,6 +25,7 @@ type EC2Metadata struct { // New creates a new instance of the EC2Metadata client with a session. // This client is safe to use across multiple goroutines. // +// // Example: // // Create a EC2Metadata client from just a session. // svc := ec2metadata.New(mySession) @@ -40,22 +40,19 @@ func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata { // NewClient returns a new EC2Metadata client. Should be used to create // a client when not using a session. Generally using just New with a session // is preferred. +// +// If an unmodified HTTP client is provided from the stdlib default, or no client +// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened. +// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default. func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata { - // If the default http client is provided, replace it with a custom - // client using default timeouts. - if cfg.HTTPClient == http.DefaultClient { + if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) { + // If the http client is unmodified and this feature is not disabled + // set custom timeouts for EC2Metadata requests. cfg.HTTPClient = &http.Client{ - Transport: &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: (&net.Dialer{ - // use a shorter timeout than default because the metadata - // service is local if it is running, and to fail faster - // if not running on an ec2 instance. - Timeout: 5 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - TLSHandshakeTimeout: 10 * time.Second, - }, + // use a shorter timeout than default because the metadata + // service is local if it is running, and to fail faster + // if not running on an ec2 instance. + Timeout: 5 * time.Second, } } @@ -84,6 +81,10 @@ func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegio return svc } +func httpClientZero(c *http.Client) bool { + return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0) +} + type metadataOutput struct { Content string } diff --git a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go index 6a0f371a2..47e4536ff 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/session/session.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/session/session.go @@ -51,12 +51,21 @@ type Session struct { // sess := session.New() // svc := s3.New(sess) func New(cfgs ...*aws.Config) *Session { - def := defaults.Get() + cfg := defaults.Config() + handlers := defaults.Handlers() + + // Apply the passed in configs so the configuration can be applied to the + // default credential chain + cfg.MergeIn(cfgs...) + cfg.Credentials = defaults.CredChain(cfg, handlers) + + // Reapply any passed in configs to override credentials if set + cfg.MergeIn(cfgs...) + s := &Session{ - Config: def.Config, - Handlers: def.Handlers, + Config: cfg, + Handlers: handlers, } - s.Config.MergeIn(cfgs...) initHandlers(s) diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index b86f36842..afe2bb1c0 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.0.6" +const SDKVersion = "1.0.11" diff --git a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json index ea819b1ec..7819eedc3 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json +++ b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json @@ -78,6 +78,9 @@ "ap-northeast-1/s3": { "endpoint": "s3-{region}.amazonaws.com" }, + "ap-northeast-2/s3": { + "endpoint": "s3-{region}.amazonaws.com" + }, "sa-east-1/s3": { "endpoint": "s3-{region}.amazonaws.com" }, diff --git a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go index 3fab91c7f..9b2e1b699 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go +++ b/vendor/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go @@ -57,6 +57,9 @@ var endpointsMap = endpointStruct{ "ap-northeast-1/s3": { Endpoint: "s3-{region}.amazonaws.com", }, + "ap-northeast-2/s3": { + Endpoint: "s3-{region}.amazonaws.com", + }, "ap-southeast-1/s3": { Endpoint: "s3-{region}.amazonaws.com", }, diff --git a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go index 27f47b02c..46837f66c 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go +++ b/vendor/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go @@ -26,6 +26,10 @@ func Unmarshal(r *request.Request) { // UnmarshalMeta unmarshals the REST metadata of a response in a REST service func UnmarshalMeta(r *request.Request) { r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") + if r.RequestID == "" { + // Alternative version of request id in the header + r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id") + } if r.DataFilled() { v := reflect.Indirect(reflect.ValueOf(r.Data)) unmarshalLocationElements(r, v) diff --git a/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go b/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go index e5fb13666..b51e9449c 100644 --- a/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go +++ b/vendor/github.com/aws/aws-sdk-go/private/waiter/waiter.go @@ -51,17 +51,15 @@ func (w *Waiter) Wait() error { err := req.Send() for _, a := range w.Acceptors { - if err != nil && a.Matcher != "error" { - // Only matcher error is valid if there is a request error - continue - } - result := false var vals []interface{} switch a.Matcher { case "pathAll", "path": // Require all matches to be equal for result to match vals, _ = awsutil.ValuesAtPath(req.Data, a.Argument) + if len(vals) == 0 { + break + } result = true for _, val := range vals { if !awsutil.DeepEqual(val, a.Expected) { diff --git a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go index 4eab48071..cd018821b 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/ec2/api.go @@ -691,7 +691,7 @@ func (c *EC2) CancelReservedInstancesListingRequest(input *CancelReservedInstanc return } -// Cancels the specified Reserved instance listing in the Reserved Instance +// Cancels the specified Reserved Instance listing in the Reserved Instance // Marketplace. // // For more information, see Reserved Instance Marketplace (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html) @@ -1341,22 +1341,22 @@ func (c *EC2) CreateReservedInstancesListingRequest(input *CreateReservedInstanc return } -// Creates a listing for Amazon EC2 Reserved instances to be sold in the Reserved -// Instance Marketplace. You can submit one Reserved instance listing at a time. -// To get a list of your Reserved instances, you can use the DescribeReservedInstances +// Creates a listing for Amazon EC2 Reserved Instances to be sold in the Reserved +// Instance Marketplace. You can submit one Reserved Instance listing at a time. +// To get a list of your Reserved Instances, you can use the DescribeReservedInstances // operation. // // The Reserved Instance Marketplace matches sellers who want to resell Reserved -// instance capacity that they no longer need with buyers who want to purchase -// additional capacity. Reserved instances bought and sold through the Reserved -// Instance Marketplace work like any other Reserved instances. +// Instance capacity that they no longer need with buyers who want to purchase +// additional capacity. Reserved Instances bought and sold through the Reserved +// Instance Marketplace work like any other Reserved Instances. // -// To sell your Reserved instances, you must first register as a seller in +// To sell your Reserved Instances, you must first register as a seller in // the Reserved Instance Marketplace. After completing the registration process, // you can create a Reserved Instance Marketplace listing of some or all of -// your Reserved instances, and specify the upfront price to receive for them. -// Your Reserved instance listings then become available for purchase. To view -// the details of your Reserved instance listing, you can use the DescribeReservedInstancesListings +// your Reserved Instances, and specify the upfront price to receive for them. +// Your Reserved Instance listings then become available for purchase. To view +// the details of your Reserved Instance listing, you can use the DescribeReservedInstancesListings // operation. // // For more information, see Reserved Instance Marketplace (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html) @@ -1475,7 +1475,7 @@ func (c *EC2) CreateSecurityGroupRequest(input *CreateSecurityGroupInput) (req * // // EC2-Classic: You can have up to 500 security groups. // -// EC2-VPC: You can create up to 100 security groups per VPC. +// EC2-VPC: You can create up to 500 security groups per VPC. // // When you create a security group, you specify a friendly name of your choice. // You can have a security group for use in EC2-Classic with the same name as @@ -3041,21 +3041,20 @@ func (c *EC2) DescribeIdFormatRequest(input *DescribeIdFormatInput) (req *reques return } -// Important: This command is reserved for future use, and is currently not -// available for you to use. -// -// Describes the ID format settings for your resources, for example, to view -// which resource types are enabled for longer IDs. This request only returns -// information about resource types whose ID formats can be modified; it does -// not return information about other resource types. +// Describes the ID format settings for your resources on a per-region basis, +// for example, to view which resource types are enabled for longer IDs. This +// request only returns information about resource types whose ID formats can +// be modified; it does not return information about other resource types. // // The following resource types support longer IDs: instance | reservation. // // These settings apply to the IAM user who makes the request; they do not // apply to the entire AWS account. By default, an IAM user defaults to the // same settings as the root user, unless they explicitly override the settings -// by running the ModifyIdFormat command. These settings are applied on a per-region -// basis. +// by running the ModifyIdFormat command. Resources created with longer IDs +// are visible to all IAM users, regardless of these settings and provided that +// they have permission to use the relevant Describe command for the resource +// type. func (c *EC2) DescribeIdFormat(input *DescribeIdFormatInput) (*DescribeIdFormatOutput, error) { req, out := c.DescribeIdFormatRequest(input) err := req.Send() @@ -3627,9 +3626,9 @@ func (c *EC2) DescribeReservedInstancesRequest(input *DescribeReservedInstancesI return } -// Describes one or more of the Reserved instances that you purchased. +// Describes one or more of the Reserved Instances that you purchased. // -// For more information about Reserved instances, see Reserved Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/concepts-on-demand-reserved-instances.html) +// For more information about Reserved Instances, see Reserved Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/concepts-on-demand-reserved-instances.html) // in the Amazon Elastic Compute Cloud User Guide. func (c *EC2) DescribeReservedInstances(input *DescribeReservedInstancesInput) (*DescribeReservedInstancesOutput, error) { req, out := c.DescribeReservedInstancesRequest(input) @@ -3657,22 +3656,22 @@ func (c *EC2) DescribeReservedInstancesListingsRequest(input *DescribeReservedIn return } -// Describes your account's Reserved instance listings in the Reserved Instance +// Describes your account's Reserved Instance listings in the Reserved Instance // Marketplace. // // The Reserved Instance Marketplace matches sellers who want to resell Reserved -// instance capacity that they no longer need with buyers who want to purchase -// additional capacity. Reserved instances bought and sold through the Reserved +// Instance capacity that they no longer need with buyers who want to purchase +// additional capacity. Reserved Instances bought and sold through the Reserved // Instance Marketplace work like any other Reserved Instances. // -// As a seller, you choose to list some or all of your Reserved instances, -// and you specify the upfront price to receive for them. Your Reserved instances +// As a seller, you choose to list some or all of your Reserved Instances, +// and you specify the upfront price to receive for them. Your Reserved Instances // are then listed in the Reserved Instance Marketplace and are available for // purchase. // -// As a buyer, you specify the configuration of the Reserved instance to purchase, +// As a buyer, you specify the configuration of the Reserved Instance to purchase, // and the Marketplace matches what you're searching for with what's available. -// The Marketplace first sells the lowest priced Reserved instances to you, +// The Marketplace first sells the lowest priced Reserved Instances to you, // and continues to sell available Reserved Instance listings to you until your // demand is met. You are charged based on the total price of all of the listings // that you purchase. @@ -3711,8 +3710,8 @@ func (c *EC2) DescribeReservedInstancesModificationsRequest(input *DescribeReser return } -// Describes the modifications made to your Reserved instances. If no parameter -// is specified, information about all your Reserved instances modification +// Describes the modifications made to your Reserved Instances. If no parameter +// is specified, information about all your Reserved Instances modification // requests is returned. If a modification ID is specified, only information // about the specific modification is returned. // @@ -3758,15 +3757,15 @@ func (c *EC2) DescribeReservedInstancesOfferingsRequest(input *DescribeReservedI return } -// Describes Reserved instance offerings that are available for purchase. With -// Reserved instances, you purchase the right to launch instances for a period +// Describes Reserved Instance offerings that are available for purchase. With +// Reserved Instances, you purchase the right to launch instances for a period // of time. During that time period, you do not receive insufficient capacity // errors, and you pay a lower usage rate than the rate charged for On-Demand // instances for the actual time used. // -// If you have listed your own Reserved instances for sale in the Reserved +// If you have listed your own Reserved Instances for sale in the Reserved // Instance Marketplace, they will be excluded from these results. This is to -// ensure that you do not purchase your own Reserved instances. +// ensure that you do not purchase your own Reserved Instances. // // For more information, see Reserved Instance Marketplace (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html) // in the Amazon Elastic Compute Cloud User Guide. @@ -3819,6 +3818,68 @@ func (c *EC2) DescribeRouteTables(input *DescribeRouteTablesInput) (*DescribeRou return out, err } +const opDescribeScheduledInstanceAvailability = "DescribeScheduledInstanceAvailability" + +// DescribeScheduledInstanceAvailabilityRequest generates a request for the DescribeScheduledInstanceAvailability operation. +func (c *EC2) DescribeScheduledInstanceAvailabilityRequest(input *DescribeScheduledInstanceAvailabilityInput) (req *request.Request, output *DescribeScheduledInstanceAvailabilityOutput) { + op := &request.Operation{ + Name: opDescribeScheduledInstanceAvailability, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeScheduledInstanceAvailabilityInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeScheduledInstanceAvailabilityOutput{} + req.Data = output + return +} + +// Finds available schedules that meet the specified criteria. +// +// You can search for an available schedule no more than 3 months in advance. +// You must meet the minimum required duration of 1,200 hours per year. For +// example, the minimum daily schedule is 4 hours, the minimum weekly schedule +// is 24 hours, and the minimum monthly schedule is 100 hours. +// +// After you find a schedule that meets your needs, call PurchaseScheduledInstances +// to purchase Scheduled Instances with that schedule. +func (c *EC2) DescribeScheduledInstanceAvailability(input *DescribeScheduledInstanceAvailabilityInput) (*DescribeScheduledInstanceAvailabilityOutput, error) { + req, out := c.DescribeScheduledInstanceAvailabilityRequest(input) + err := req.Send() + return out, err +} + +const opDescribeScheduledInstances = "DescribeScheduledInstances" + +// DescribeScheduledInstancesRequest generates a request for the DescribeScheduledInstances operation. +func (c *EC2) DescribeScheduledInstancesRequest(input *DescribeScheduledInstancesInput) (req *request.Request, output *DescribeScheduledInstancesOutput) { + op := &request.Operation{ + Name: opDescribeScheduledInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeScheduledInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeScheduledInstancesOutput{} + req.Data = output + return +} + +// Describes one or more of your Scheduled Instances. +func (c *EC2) DescribeScheduledInstances(input *DescribeScheduledInstancesInput) (*DescribeScheduledInstancesOutput, error) { + req, out := c.DescribeScheduledInstancesRequest(input) + err := req.Send() + return out, err +} + const opDescribeSecurityGroups = "DescribeSecurityGroups" // DescribeSecurityGroupsRequest generates a request for the DescribeSecurityGroups operation. @@ -4452,6 +4513,39 @@ func (c *EC2) DescribeVpcClassicLink(input *DescribeVpcClassicLinkInput) (*Descr return out, err } +const opDescribeVpcClassicLinkDnsSupport = "DescribeVpcClassicLinkDnsSupport" + +// DescribeVpcClassicLinkDnsSupportRequest generates a request for the DescribeVpcClassicLinkDnsSupport operation. +func (c *EC2) DescribeVpcClassicLinkDnsSupportRequest(input *DescribeVpcClassicLinkDnsSupportInput) (req *request.Request, output *DescribeVpcClassicLinkDnsSupportOutput) { + op := &request.Operation{ + Name: opDescribeVpcClassicLinkDnsSupport, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DescribeVpcClassicLinkDnsSupportInput{} + } + + req = c.newRequest(op, input, output) + output = &DescribeVpcClassicLinkDnsSupportOutput{} + req.Data = output + return +} + +// Describes the ClassicLink DNS support status of one or more VPCs. If enabled, +// the DNS hostname of a linked EC2-Classic instance resolves to its private +// IP address when addressed from an instance in the VPC to which it's linked. +// Similarly, the DNS hostname of an instance in a VPC resolves to its private +// IP address when addressed from a linked EC2-Classic instance. For more information +// about ClassicLink, see ClassicLink (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DescribeVpcClassicLinkDnsSupport(input *DescribeVpcClassicLinkDnsSupportInput) (*DescribeVpcClassicLinkDnsSupportOutput, error) { + req, out := c.DescribeVpcClassicLinkDnsSupportRequest(input) + err := req.Send() + return out, err +} + const opDescribeVpcEndpointServices = "DescribeVpcEndpointServices" // DescribeVpcEndpointServicesRequest generates a request for the DescribeVpcEndpointServices operation. @@ -4837,6 +4931,37 @@ func (c *EC2) DisableVpcClassicLink(input *DisableVpcClassicLinkInput) (*Disable return out, err } +const opDisableVpcClassicLinkDnsSupport = "DisableVpcClassicLinkDnsSupport" + +// DisableVpcClassicLinkDnsSupportRequest generates a request for the DisableVpcClassicLinkDnsSupport operation. +func (c *EC2) DisableVpcClassicLinkDnsSupportRequest(input *DisableVpcClassicLinkDnsSupportInput) (req *request.Request, output *DisableVpcClassicLinkDnsSupportOutput) { + op := &request.Operation{ + Name: opDisableVpcClassicLinkDnsSupport, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &DisableVpcClassicLinkDnsSupportInput{} + } + + req = c.newRequest(op, input, output) + output = &DisableVpcClassicLinkDnsSupportOutput{} + req.Data = output + return +} + +// Disables ClassicLink DNS support for a VPC. If disabled, DNS hostnames resolve +// to public IP addresses when addressed between a linked EC2-Classic instance +// and instances in the VPC to which it's linked. For more information about +// ClassicLink, see ClassicLink (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) DisableVpcClassicLinkDnsSupport(input *DisableVpcClassicLinkDnsSupportInput) (*DisableVpcClassicLinkDnsSupportOutput, error) { + req, out := c.DisableVpcClassicLinkDnsSupportRequest(input) + err := req.Send() + return out, err +} + const opDisassociateAddress = "DisassociateAddress" // DisassociateAddressRequest generates a request for the DisassociateAddress operation. @@ -4993,6 +5118,39 @@ func (c *EC2) EnableVpcClassicLink(input *EnableVpcClassicLinkInput) (*EnableVpc return out, err } +const opEnableVpcClassicLinkDnsSupport = "EnableVpcClassicLinkDnsSupport" + +// EnableVpcClassicLinkDnsSupportRequest generates a request for the EnableVpcClassicLinkDnsSupport operation. +func (c *EC2) EnableVpcClassicLinkDnsSupportRequest(input *EnableVpcClassicLinkDnsSupportInput) (req *request.Request, output *EnableVpcClassicLinkDnsSupportOutput) { + op := &request.Operation{ + Name: opEnableVpcClassicLinkDnsSupport, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &EnableVpcClassicLinkDnsSupportInput{} + } + + req = c.newRequest(op, input, output) + output = &EnableVpcClassicLinkDnsSupportOutput{} + req.Data = output + return +} + +// Enables a VPC to support DNS hostname resolution for ClassicLink. If enabled, +// the DNS hostname of a linked EC2-Classic instance resolves to its private +// IP address when addressed from an instance in the VPC to which it's linked. +// Similarly, the DNS hostname of an instance in a VPC resolves to its private +// IP address when addressed from a linked EC2-Classic instance. For more information +// about ClassicLink, see ClassicLink (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/vpc-classiclink.html) +// in the Amazon Elastic Compute Cloud User Guide. +func (c *EC2) EnableVpcClassicLinkDnsSupport(input *EnableVpcClassicLinkDnsSupportInput) (*EnableVpcClassicLinkDnsSupportOutput, error) { + req, out := c.EnableVpcClassicLinkDnsSupportRequest(input) + err := req.Send() + return out, err +} + const opGetConsoleOutput = "GetConsoleOutput" // GetConsoleOutputRequest generates a request for the GetConsoleOutput operation. @@ -5284,17 +5442,17 @@ func (c *EC2) ModifyIdFormatRequest(input *ModifyIdFormatInput) (req *request.Re return } -// Important: This command is reserved for future use, and is currently not -// available for you to use. -// -// Modifies the ID format for the specified resource. You can specify that -// resources should receive longer IDs (17-character IDs) when they are created. -// The following resource types support longer IDs: instance | reservation. +// Modifies the ID format for the specified resource on a per-region basis. +// You can specify that resources should receive longer IDs (17-character IDs) +// when they are created. The following resource types support longer IDs: instance +// | reservation. // // This setting applies to the IAM user who makes the request; it does not // apply to the entire AWS account. By default, an IAM user defaults to the // same settings as the root user, unless they explicitly override the settings -// by running this request. These settings are applied on a per-region basis. +// by running this request. Resources created with longer IDs are visible to +// all IAM users, regardless of these settings and provided that they have permission +// to use the relevant Describe command for the resource type. func (c *EC2) ModifyIdFormat(input *ModifyIdFormatInput) (*ModifyIdFormatOutput, error) { req, out := c.ModifyIdFormatRequest(input) err := req.Send() @@ -5458,8 +5616,8 @@ func (c *EC2) ModifyReservedInstancesRequest(input *ModifyReservedInstancesInput } // Modifies the Availability Zone, instance count, instance type, or network -// platform (EC2-Classic or EC2-VPC) of your Reserved instances. The Reserved -// instances to be modified must be identical, except for Availability Zone, +// platform (EC2-Classic or EC2-VPC) of your Reserved Instances. The Reserved +// Instances to be modified must be identical, except for Availability Zone, // network platform, and instance type. // // For more information, see Modifying Reserved Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-modifying.html) @@ -5722,12 +5880,14 @@ func (c *EC2) MoveAddressToVpcRequest(input *MoveAddressToVpcInput) (req *reques } // Moves an Elastic IP address from the EC2-Classic platform to the EC2-VPC -// platform. The Elastic IP address must be allocated to your account, and it -// must not be associated with an instance. After the Elastic IP address is -// moved, it is no longer available for use in the EC2-Classic platform, unless -// you move it back using the RestoreAddressToClassic request. You cannot move -// an Elastic IP address that's allocated for use in the EC2-VPC platform to -// the EC2-Classic platform. +// platform. The Elastic IP address must be allocated to your account for more +// than 24 hours, and it must not be associated with an instance. After the +// Elastic IP address is moved, it is no longer available for use in the EC2-Classic +// platform, unless you move it back using the RestoreAddressToClassic request. +// You cannot move an Elastic IP address that's allocated for use in the EC2-VPC +// platform to the EC2-Classic platform. You cannot migrate an Elastic IP address +// that's associated with a reverse DNS record. Contact AWS account and billing +// support to remove the reverse DNS record. func (c *EC2) MoveAddressToVpc(input *MoveAddressToVpcInput) (*MoveAddressToVpcOutput, error) { req, out := c.MoveAddressToVpcRequest(input) err := req.Send() @@ -5754,14 +5914,14 @@ func (c *EC2) PurchaseReservedInstancesOfferingRequest(input *PurchaseReservedIn return } -// Purchases a Reserved instance for use with your account. With Amazon EC2 -// Reserved instances, you obtain a capacity reservation for a certain instance -// configuration over a specified period of time and pay a lower hourly rate -// compared to On-Demand Instance pricing. +// Purchases a Reserved Instance for use with your account. With Reserved Instances, +// you obtain a capacity reservation for a certain instance configuration over +// a specified period of time and pay a lower hourly rate compared to On-Demand +// instance pricing. // -// Use DescribeReservedInstancesOfferings to get a list of Reserved instance +// Use DescribeReservedInstancesOfferings to get a list of Reserved Instance // offerings that match your specifications. After you've purchased a Reserved -// instance, you can check for your new Reserved instance with DescribeReservedInstances. +// Instance, you can check for your new Reserved Instance with DescribeReservedInstances. // // For more information, see Reserved Instances (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/concepts-on-demand-reserved-instances.html) // and Reserved Instance Marketplace (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ri-market-general.html) @@ -5772,6 +5932,38 @@ func (c *EC2) PurchaseReservedInstancesOffering(input *PurchaseReservedInstances return out, err } +const opPurchaseScheduledInstances = "PurchaseScheduledInstances" + +// PurchaseScheduledInstancesRequest generates a request for the PurchaseScheduledInstances operation. +func (c *EC2) PurchaseScheduledInstancesRequest(input *PurchaseScheduledInstancesInput) (req *request.Request, output *PurchaseScheduledInstancesOutput) { + op := &request.Operation{ + Name: opPurchaseScheduledInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &PurchaseScheduledInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &PurchaseScheduledInstancesOutput{} + req.Data = output + return +} + +// Purchases one or more Scheduled Instances with the specified schedule. +// +// Scheduled Instances enable you to purchase Amazon EC2 compute capacity by +// the hour for a one-year term. Before you can purchase a Scheduled Instance, +// you must call DescribeScheduledInstanceAvailability to check for available +// schedules and obtain a purchase token. +func (c *EC2) PurchaseScheduledInstances(input *PurchaseScheduledInstancesInput) (*PurchaseScheduledInstancesOutput, error) { + req, out := c.PurchaseScheduledInstancesRequest(input) + err := req.Send() + return out, err +} + const opRebootInstances = "RebootInstances" // RebootInstancesRequest generates a request for the RebootInstances operation. @@ -6358,7 +6550,9 @@ func (c *EC2) RestoreAddressToClassicRequest(input *RestoreAddressToClassicInput // Restores an Elastic IP address that was previously moved to the EC2-VPC platform // back to the EC2-Classic platform. You cannot move an Elastic IP address that // was originally allocated for use in EC2-VPC. The Elastic IP address must -// not be associated with an instance or network interface. +// not be associated with an instance or network interface. You cannot restore +// an Elastic IP address that's associated with a reverse DNS record. Contact +// AWS account and billing support to remove the reverse DNS record. func (c *EC2) RestoreAddressToClassic(input *RestoreAddressToClassicInput) (*RestoreAddressToClassicOutput, error) { req, out := c.RestoreAddressToClassicRequest(input) err := req.Send() @@ -6505,6 +6699,41 @@ func (c *EC2) RunInstances(input *RunInstancesInput) (*Reservation, error) { return out, err } +const opRunScheduledInstances = "RunScheduledInstances" + +// RunScheduledInstancesRequest generates a request for the RunScheduledInstances operation. +func (c *EC2) RunScheduledInstancesRequest(input *RunScheduledInstancesInput) (req *request.Request, output *RunScheduledInstancesOutput) { + op := &request.Operation{ + Name: opRunScheduledInstances, + HTTPMethod: "POST", + HTTPPath: "/", + } + + if input == nil { + input = &RunScheduledInstancesInput{} + } + + req = c.newRequest(op, input, output) + output = &RunScheduledInstancesOutput{} + req.Data = output + return +} + +// Launches the specified Scheduled Instances. +// +// Before you can launch a Scheduled Instance, you must purchase it and obtain +// an identifier using PurchaseScheduledInstances. +// +// You must launch a Scheduled Instance during its scheduled time period. You +// can't stop or reboot a Scheduled Instance, but you can terminate it as needed. +// If you terminate a Scheduled Instance before the current scheduled time period +// ends, you can launch it again after a few minutes. +func (c *EC2) RunScheduledInstances(input *RunScheduledInstancesInput) (*RunScheduledInstancesOutput, error) { + req, out := c.RunScheduledInstancesRequest(input) + err := req.Send() + return out, err +} + const opStartInstances = "StartInstances" // StartInstancesRequest generates a request for the StartInstances operation. @@ -7938,7 +8167,7 @@ func (s CancelImportTaskOutput) GoString() string { type CancelReservedInstancesListingInput struct { _ struct{} `type:"structure"` - // The ID of the Reserved instance listing. + // The ID of the Reserved Instance listing. ReservedInstancesListingId *string `locationName:"reservedInstancesListingId" type:"string" required:"true"` } @@ -7955,7 +8184,7 @@ func (s CancelReservedInstancesListingInput) GoString() string { type CancelReservedInstancesListingOutput struct { _ struct{} `type:"structure"` - // The Reserved instance listing. + // The Reserved Instance listing. ReservedInstancesListings []*ReservedInstancesListing `locationName:"reservedInstancesListingsSet" locationNameList:"item" type:"list"` } @@ -8147,6 +8376,27 @@ func (s CancelledSpotInstanceRequest) GoString() string { return s.String() } +// Describes the ClassicLink DNS support status of a VPC. +type ClassicLinkDnsSupport struct { + _ struct{} `type:"structure"` + + // Indicates whether ClassicLink DNS support is enabled for the VPC. + ClassicLinkDnsSupported *bool `locationName:"classicLinkDnsSupported" type:"boolean"` + + // The ID of the VPC. + VpcId *string `locationName:"vpcId" type:"string"` +} + +// String returns the string representation +func (s ClassicLinkDnsSupport) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ClassicLinkDnsSupport) GoString() string { + return s.String() +} + // Describes a linked EC2-Classic instance. type ClassicLinkInstance struct { _ struct{} `type:"structure"` @@ -9053,17 +9303,17 @@ type CreateReservedInstancesListingInput struct { // Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). ClientToken *string `locationName:"clientToken" type:"string" required:"true"` - // The number of instances that are a part of a Reserved instance account to + // The number of instances that are a part of a Reserved Instance account to // be listed in the Reserved Instance Marketplace. This number should be less - // than or equal to the instance count associated with the Reserved instance + // than or equal to the instance count associated with the Reserved Instance // ID specified in this call. InstanceCount *int64 `locationName:"instanceCount" type:"integer" required:"true"` - // A list specifying the price of the Reserved instance for each month remaining - // in the Reserved instance term. + // A list specifying the price of the Reserved Instance for each month remaining + // in the Reserved Instance term. PriceSchedules []*PriceScheduleSpecification `locationName:"priceSchedules" locationNameList:"item" type:"list" required:"true"` - // The ID of the active Reserved instance. + // The ID of the active Reserved Instance. ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string" required:"true"` } @@ -9080,7 +9330,7 @@ func (s CreateReservedInstancesListingInput) GoString() string { type CreateReservedInstancesListingOutput struct { _ struct{} `type:"structure"` - // Information about the Reserved instance listing. + // Information about the Reserved Instance listing. ReservedInstancesListings []*ReservedInstancesListing `locationName:"reservedInstancesListingsSet" locationNameList:"item" type:"list"` } @@ -12146,7 +12396,7 @@ type DescribeInstancesInput struct { // // network-interface.mac-address - The MAC address of the network interface. // - // network-interface-private-dns-name - The private DNS name of the network + // network-interface.private-dns-name - The private DNS name of the network // interface. // // network-interface.source-dest-check - Whether the network interface performs @@ -12952,20 +13202,20 @@ type DescribeReservedInstancesInput struct { // One or more filters. // - // availability-zone - The Availability Zone where the Reserved instance + // availability-zone - The Availability Zone where the Reserved Instance // can be used. // - // duration - The duration of the Reserved instance (one year or three years), + // duration - The duration of the Reserved Instance (one year or three years), // in seconds (31536000 | 94608000). // - // end - The time when the Reserved instance expires (for example, 2015-08-07T11:54:42.000Z). + // end - The time when the Reserved Instance expires (for example, 2015-08-07T11:54:42.000Z). // - // fixed-price - The purchase price of the Reserved instance (for example, + // fixed-price - The purchase price of the Reserved Instance (for example, // 9800.0). // // instance-type - The instance type that is covered by the reservation. // - // product-description - The Reserved instance product platform description. + // product-description - The Reserved Instance product platform description. // Instances that include (Amazon VPC) in the product platform description will // only be displayed to EC2-Classic account holders and are for use with Amazon // VPC (Linux/UNIX | Linux/UNIX (Amazon VPC) | SUSE Linux | SUSE Linux (Amazon @@ -12975,12 +13225,12 @@ type DescribeReservedInstancesInput struct { // with SQL Server Web (Amazon VPC) | Windows with SQL Server Enterprise | Windows // with SQL Server Enterprise (Amazon VPC)). // - // reserved-instances-id - The ID of the Reserved instance. + // reserved-instances-id - The ID of the Reserved Instance. // - // start - The time at which the Reserved instance purchase request was placed + // start - The time at which the Reserved Instance purchase request was placed // (for example, 2014-08-07T11:54:42.000Z). // - // state - The state of the Reserved instance (payment-pending | active | + // state - The state of the Reserved Instance (payment-pending | active | // payment-failed | retired). // // tag:key=value - The key/value combination of a tag assigned to the resource. @@ -12995,18 +13245,18 @@ type DescribeReservedInstancesInput struct { // tag-value - The value of a tag assigned to the resource. This filter is // independent of the tag-key filter. // - // usage-price - The usage price of the Reserved instance, per hour (for + // usage-price - The usage price of the Reserved Instance, per hour (for // example, 0.84). Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` - // The Reserved instance offering type. If you are using tools that predate + // The Reserved Instance offering type. If you are using tools that predate // the 2011-11-01 API version, you only have access to the Medium Utilization - // Reserved instance offering type. + // Reserved Instance offering type. OfferingType *string `locationName:"offeringType" type:"string" enum:"OfferingTypeValues"` - // One or more Reserved instance IDs. + // One or more Reserved Instance IDs. // - // Default: Describes all your Reserved instances, or only those otherwise + // Default: Describes all your Reserved Instances, or only those otherwise // specified. ReservedInstancesIds []*string `locationName:"ReservedInstancesId" locationNameList:"ReservedInstancesId" type:"list"` } @@ -13026,20 +13276,20 @@ type DescribeReservedInstancesListingsInput struct { // One or more filters. // - // reserved-instances-id - The ID of the Reserved instances. + // reserved-instances-id - The ID of the Reserved Instances. // - // reserved-instances-listing-id - The ID of the Reserved instances listing. + // reserved-instances-listing-id - The ID of the Reserved Instances listing. // - // status - The status of the Reserved instance listing (pending | active + // status - The status of the Reserved Instance listing (pending | active // | cancelled | closed). // // status-message - The reason for the status. Filters []*Filter `locationName:"filters" locationNameList:"Filter" type:"list"` - // One or more Reserved instance IDs. + // One or more Reserved Instance IDs. ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string"` - // One or more Reserved instance Listing IDs. + // One or more Reserved Instance listing IDs. ReservedInstancesListingId *string `locationName:"reservedInstancesListingId" type:"string"` } @@ -13056,7 +13306,7 @@ func (s DescribeReservedInstancesListingsInput) GoString() string { type DescribeReservedInstancesListingsOutput struct { _ struct{} `type:"structure"` - // Information about the Reserved instance listing. + // Information about the Reserved Instance listing. ReservedInstancesListings []*ReservedInstancesListing `locationName:"reservedInstancesListingsSet" locationNameList:"item" type:"list"` } @@ -13081,27 +13331,27 @@ type DescribeReservedInstancesModificationsInput struct { // // effective-date - The time when the modification becomes effective. // - // modification-result.reserved-instances-id - The ID for the Reserved instances + // modification-result.reserved-instances-id - The ID for the Reserved Instances // created as part of the modification request. This ID is only available when // the status of the modification is fulfilled. // // modification-result.target-configuration.availability-zone - The Availability - // Zone for the new Reserved instances. + // Zone for the new Reserved Instances. // // modification-result.target-configuration.instance-count - The number - // of new Reserved instances. + // of new Reserved Instances. // // modification-result.target-configuration.instance-type - The instance - // type of the new Reserved instances. + // type of the new Reserved Instances. // // modification-result.target-configuration.platform - The network platform - // of the new Reserved instances (EC2-Classic | EC2-VPC). + // of the new Reserved Instances (EC2-Classic | EC2-VPC). // - // reserved-instances-id - The ID of the Reserved instances modified. + // reserved-instances-id - The ID of the Reserved Instances modified. // // reserved-instances-modification-id - The ID of the modification request. // - // status - The status of the Reserved instances modification request (processing + // status - The status of the Reserved Instances modification request (processing // | fulfilled | failed). // // status-message - The reason for the status. @@ -13133,7 +13383,7 @@ type DescribeReservedInstancesModificationsOutput struct { // when there are no more results to return. NextToken *string `locationName:"nextToken" type:"string"` - // The Reserved instance modification information. + // The Reserved Instance modification information. ReservedInstancesModifications []*ReservedInstancesModification `locationName:"reservedInstancesModificationsSet" locationNameList:"item" type:"list"` } @@ -13150,7 +13400,7 @@ func (s DescribeReservedInstancesModificationsOutput) GoString() string { type DescribeReservedInstancesOfferingsInput struct { _ struct{} `type:"structure"` - // The Availability Zone in which the Reserved instance can be used. + // The Availability Zone in which the Reserved Instance can be used. AvailabilityZone *string `type:"string"` // Checks whether you have the required permissions for the action, without @@ -13161,13 +13411,13 @@ type DescribeReservedInstancesOfferingsInput struct { // One or more filters. // - // availability-zone - The Availability Zone where the Reserved instance + // availability-zone - The Availability Zone where the Reserved Instance // can be used. // - // duration - The duration of the Reserved instance (for example, one year + // duration - The duration of the Reserved Instance (for example, one year // or three years), in seconds (31536000 | 94608000). // - // fixed-price - The purchase price of the Reserved instance (for example, + // fixed-price - The purchase price of the Reserved Instance (for example, // 9800.0). // // instance-type - The instance type that is covered by the reservation. @@ -13176,7 +13426,7 @@ type DescribeReservedInstancesOfferingsInput struct { // When this filter is not used, which is the default behavior, all offerings // from both AWS and the Reserved Instance Marketplace are listed. // - // product-description - The Reserved instance product platform description. + // product-description - The Reserved Instance product platform description. // Instances that include (Amazon VPC) in the product platform description will // only be displayed to EC2-Classic account holders and are for use with Amazon // VPC. (Linux/UNIX | Linux/UNIX (Amazon VPC) | SUSE Linux | SUSE Linux (Amazon @@ -13186,18 +13436,18 @@ type DescribeReservedInstancesOfferingsInput struct { // with SQL Server Web (Amazon VPC) | Windows with SQL Server Enterprise | Windows // with SQL Server Enterprise (Amazon VPC)) // - // reserved-instances-offering-id - The Reserved instances' offering ID. + // reserved-instances-offering-id - The Reserved Instances offering ID. // - // usage-price - The usage price of the Reserved instance, per hour (for + // usage-price - The usage price of the Reserved Instance, per hour (for // example, 0.84). Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` // Include Reserved Instance Marketplace offerings in the response. IncludeMarketplace *bool `type:"boolean"` - // The tenancy of the instances covered by the reservation. A Reserved instance + // The tenancy of the instances covered by the reservation. A Reserved Instance // with a tenancy of dedicated is applied to instances that run in a VPC on - // single-tenant hardware (i.e., Dedicated instances). + // single-tenant hardware (i.e., Dedicated Instances). // // Default: default InstanceTenancy *string `locationName:"instanceTenancy" type:"string" enum:"Tenancy"` @@ -13232,16 +13482,16 @@ type DescribeReservedInstancesOfferingsInput struct { // The token to retrieve the next page of results. NextToken *string `locationName:"nextToken" type:"string"` - // The Reserved instance offering type. If you are using tools that predate + // The Reserved Instance offering type. If you are using tools that predate // the 2011-11-01 API version, you only have access to the Medium Utilization - // Reserved instance offering type. + // Reserved Instance offering type. OfferingType *string `locationName:"offeringType" type:"string" enum:"OfferingTypeValues"` - // The Reserved instance product platform description. Instances that include + // The Reserved Instance product platform description. Instances that include // (Amazon VPC) in the description are for use with Amazon VPC. ProductDescription *string `type:"string" enum:"RIProductDescription"` - // One or more Reserved instances offering IDs. + // One or more Reserved Instances offering IDs. ReservedInstancesOfferingIds []*string `locationName:"ReservedInstancesOfferingId" type:"list"` } @@ -13262,7 +13512,7 @@ type DescribeReservedInstancesOfferingsOutput struct { // when there are no more results to return. NextToken *string `locationName:"nextToken" type:"string"` - // A list of Reserved instances offerings. + // A list of Reserved Instances offerings. ReservedInstancesOfferings []*ReservedInstancesOffering `locationName:"reservedInstancesOfferingsSet" locationNameList:"item" type:"list"` } @@ -13279,7 +13529,7 @@ func (s DescribeReservedInstancesOfferingsOutput) GoString() string { type DescribeReservedInstancesOutput struct { _ struct{} `type:"structure"` - // A list of Reserved instances. + // A list of Reserved Instances. ReservedInstances []*ReservedInstances `locationName:"reservedInstancesSet" locationNameList:"item" type:"list"` } @@ -13392,6 +13642,150 @@ func (s DescribeRouteTablesOutput) GoString() string { return s.String() } +// Contains the parameters for DescribeScheduledInstanceAvailability. +type DescribeScheduledInstanceAvailabilityInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + // + // availability-zone - The Availability Zone (for example, us-west-2a). + // + // instance-type - The instance type (for example, c4.large). + // + // network-platform - The network platform (EC2-Classic or EC2-VPC). + // + // platform - The platform (Linux/UNIX or Windows). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The time period for the first schedule to start. + FirstSlotStartTimeRange *SlotDateTimeRangeRequest `type:"structure" required:"true"` + + // The maximum number of results to return in a single call. To retrieve the + // remaining results, make another call with the returned NextToken value. + MaxResults *int64 `type:"integer"` + + // The maximum available duration, in hours. This value must be greater than + // MinSlotDurationInHours and less than 1,720. + MaxSlotDurationInHours *int64 `type:"integer"` + + // The minimum available duration, in hours. The minimum required duration is + // 1,200 hours per year. For example, the minimum daily schedule is 4 hours, + // the minimum weekly schedule is 24 hours, and the minimum monthly schedule + // is 100 hours. + MinSlotDurationInHours *int64 `type:"integer"` + + // The token for the next set of results. + NextToken *string `type:"string"` + + // The schedule recurrence. + Recurrence *ScheduledInstanceRecurrenceRequest `type:"structure" required:"true"` +} + +// String returns the string representation +func (s DescribeScheduledInstanceAvailabilityInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScheduledInstanceAvailabilityInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeScheduledInstanceAvailability. +type DescribeScheduledInstanceAvailabilityOutput struct { + _ struct{} `type:"structure"` + + // The token required to retrieve the next set of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the available Scheduled Instances. + ScheduledInstanceAvailabilitySet []*ScheduledInstanceAvailability `locationName:"scheduledInstanceAvailabilitySet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeScheduledInstanceAvailabilityOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScheduledInstanceAvailabilityOutput) GoString() string { + return s.String() +} + +// Contains the parameters for DescribeScheduledInstances. +type DescribeScheduledInstancesInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more filters. + // + // availability-zone - The Availability Zone (for example, us-west-2a). + // + // instance-type - The instance type (for example, c4.large). + // + // network-platform - The network platform (EC2-Classic or EC2-VPC). + // + // platform - The platform (Linux/UNIX or Windows). + Filters []*Filter `locationName:"Filter" locationNameList:"Filter" type:"list"` + + // The maximum number of results to return in a single call. To retrieve the + // remaining results, make another call with the returned NextToken value. + MaxResults *int64 `type:"integer"` + + // The token for the next set of results. + NextToken *string `type:"string"` + + // One or more Scheduled Instance IDs. + ScheduledInstanceIds []*string `locationName:"ScheduledInstanceId" locationNameList:"ScheduledInstanceId" type:"list"` + + // The time period for the first schedule to start. + SlotStartTimeRange *SlotStartTimeRangeRequest `type:"structure"` +} + +// String returns the string representation +func (s DescribeScheduledInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScheduledInstancesInput) GoString() string { + return s.String() +} + +// Contains the output of DescribeScheduledInstances. +type DescribeScheduledInstancesOutput struct { + _ struct{} `type:"structure"` + + // The token required to retrieve the next set of results. This value is null + // when there are no more results to return. + NextToken *string `locationName:"nextToken" type:"string"` + + // Information about the Scheduled Instances. + ScheduledInstanceSet []*ScheduledInstance `locationName:"scheduledInstanceSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeScheduledInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeScheduledInstancesOutput) GoString() string { + return s.String() +} + type DescribeSecurityGroupsInput struct { _ struct{} `type:"structure"` @@ -14552,15 +14946,61 @@ func (s DescribeVpcAttributeOutput) GoString() string { return s.String() } -type DescribeVpcClassicLinkInput struct { +type DescribeVpcClassicLinkDnsSupportInput struct { _ struct{} `type:"structure"` - // Checks whether you have the required permissions for the action, without - // actually making the request, and provides an error response. If you have - // the required permissions, the error response is DryRunOperation. Otherwise, - // it is UnauthorizedOperation. - DryRun *bool `locationName:"dryRun" type:"boolean"` - + // The maximum number of items to return for this request. The request returns + // a token that you can specify in a subsequent call to get the next set of + // results. + MaxResults *int64 `locationName:"maxResults" min:"5" type:"integer"` + + // The token for the next set of items to return. (You received this token from + // a prior call.) + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // One or more VPC IDs. + VpcIds []*string `locationNameList:"VpcId" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcClassicLinkDnsSupportInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcClassicLinkDnsSupportInput) GoString() string { + return s.String() +} + +type DescribeVpcClassicLinkDnsSupportOutput struct { + _ struct{} `type:"structure"` + + // The token to use when requesting the next set of items. + NextToken *string `locationName:"nextToken" min:"1" type:"string"` + + // Information about the ClassicLink DNS support status of the VPCs. + Vpcs []*ClassicLinkDnsSupport `locationName:"vpcs" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s DescribeVpcClassicLinkDnsSupportOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DescribeVpcClassicLinkDnsSupportOutput) GoString() string { + return s.String() +} + +type DescribeVpcClassicLinkInput struct { + _ struct{} `type:"structure"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `locationName:"dryRun" type:"boolean"` + // One or more filters. // // is-classic-link-enabled - Whether the VPC is enabled for ClassicLink (true @@ -14979,7 +15419,8 @@ type DescribeVpnGatewaysInput struct { // // attachment.vpc-id - The ID of an attached VPC. // - // availability-zone - The Availability Zone for the virtual private gateway. + // availability-zone - The Availability Zone for the virtual private gateway + // (if applicable). // // state - The state of the virtual private gateway (pending | available // | deleting | deleted). @@ -15315,6 +15756,40 @@ func (s DisableVgwRoutePropagationOutput) GoString() string { return s.String() } +type DisableVpcClassicLinkDnsSupportInput struct { + _ struct{} `type:"structure"` + + // The ID of the VPC. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s DisableVpcClassicLinkDnsSupportInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableVpcClassicLinkDnsSupportInput) GoString() string { + return s.String() +} + +type DisableVpcClassicLinkDnsSupportOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s DisableVpcClassicLinkDnsSupportOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s DisableVpcClassicLinkDnsSupportOutput) GoString() string { + return s.String() +} + type DisableVpcClassicLinkInput struct { _ struct{} `type:"structure"` @@ -15711,6 +16186,40 @@ func (s EnableVolumeIOOutput) GoString() string { return s.String() } +type EnableVpcClassicLinkDnsSupportInput struct { + _ struct{} `type:"structure"` + + // The ID of the VPC. + VpcId *string `type:"string"` +} + +// String returns the string representation +func (s EnableVpcClassicLinkDnsSupportInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableVpcClassicLinkDnsSupportInput) GoString() string { + return s.String() +} + +type EnableVpcClassicLinkDnsSupportOutput struct { + _ struct{} `type:"structure"` + + // Returns true if the request succeeds; otherwise, it returns an error. + Return *bool `locationName:"return" type:"boolean"` +} + +// String returns the string representation +func (s EnableVpcClassicLinkDnsSupportOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s EnableVpcClassicLinkDnsSupportOutput) GoString() string { + return s.String() +} + type EnableVpcClassicLinkInput struct { _ struct{} `type:"structure"` @@ -16018,7 +16527,8 @@ type GetConsoleOutputOutput struct { // The ID of the instance. InstanceId *string `locationName:"instanceId" type:"string"` - // The console output, Base64 encoded. + // The console output, Base64 encoded. If using a command line tool, the tools + // decode the output for you. Output *string `locationName:"output" type:"string"` // The time the output was last updated. @@ -17186,14 +17696,14 @@ func (s InstanceCapacity) GoString() string { return s.String() } -// Describes a Reserved instance listing state. +// Describes a Reserved Instance listing state. type InstanceCount struct { _ struct{} `type:"structure"` - // The number of listed Reserved instances in the state specified by the state. + // The number of listed Reserved Instances in the state specified by the state. InstanceCount *int64 `locationName:"instanceCount" type:"integer"` - // The states of the listed Reserved instances. + // The states of the listed Reserved Instances. State *string `locationName:"state" type:"string" enum:"ListingState"` } @@ -18216,10 +18726,10 @@ type ModifyReservedInstancesInput struct { // modification request. For more information, see Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). ClientToken *string `locationName:"clientToken" type:"string"` - // The IDs of the Reserved instances to modify. + // The IDs of the Reserved Instances to modify. ReservedInstancesIds []*string `locationName:"ReservedInstancesId" locationNameList:"ReservedInstancesId" type:"list" required:"true"` - // The configuration settings for the Reserved instances to modify. + // The configuration settings for the Reserved Instances to modify. TargetConfigurations []*ReservedInstancesConfiguration `locationName:"ReservedInstancesConfigurationSetItemType" locationNameList:"item" type:"list" required:"true"` } @@ -19155,15 +19665,15 @@ func (s PrefixListId) GoString() string { return s.String() } -// Describes the price for a Reserved instance. +// Describes the price for a Reserved Instance. type PriceSchedule struct { _ struct{} `type:"structure"` // The current price schedule, as determined by the term remaining for the Reserved - // instance in the listing. + // Instance in the listing. // // A specific price schedule is always in effect, but only one price schedule - // can be active at any time. Take, for example, a Reserved instance listing + // can be active at any time. Take, for example, a Reserved Instance listing // that has five months remaining in its term. When you specify price schedules // for five months and two months, this means that schedule 1, covering the // first three months of the remaining term, will be active during months 5, @@ -19171,7 +19681,7 @@ type PriceSchedule struct { // be active for months 2 and 1. Active *bool `locationName:"active" type:"boolean"` - // The currency for transacting the Reserved instance resale. At this time, + // The currency for transacting the Reserved Instance resale. At this time, // the only supported currency is USD. CurrencyCode *string `locationName:"currencyCode" type:"string" enum:"CurrencyCodeValues"` @@ -19193,11 +19703,11 @@ func (s PriceSchedule) GoString() string { return s.String() } -// Describes the price for a Reserved instance. +// Describes the price for a Reserved Instance. type PriceScheduleSpecification struct { _ struct{} `type:"structure"` - // The currency for transacting the Reserved instance resale. At this time, + // The currency for transacting the Reserved Instance resale. At this time, // the only supported currency is USD. CurrencyCode *string `locationName:"currencyCode" type:"string" enum:"CurrencyCodeValues"` @@ -19219,7 +19729,7 @@ func (s PriceScheduleSpecification) GoString() string { return s.String() } -// Describes a Reserved instance offering. +// Describes a Reserved Instance offering. type PricingDetail struct { _ struct{} `type:"structure"` @@ -19301,6 +19811,27 @@ func (s PropagatingVgw) GoString() string { return s.String() } +// Describes a request to purchase Scheduled Instances. +type PurchaseRequest struct { + _ struct{} `type:"structure"` + + // The number of instances. + InstanceCount *int64 `type:"integer"` + + // The purchase token. + PurchaseToken *string `type:"string"` +} + +// String returns the string representation +func (s PurchaseRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurchaseRequest) GoString() string { + return s.String() +} + type PurchaseReservedInstancesOfferingInput struct { _ struct{} `type:"structure"` @@ -19310,15 +19841,15 @@ type PurchaseReservedInstancesOfferingInput struct { // it is UnauthorizedOperation. DryRun *bool `locationName:"dryRun" type:"boolean"` - // The number of Reserved instances to purchase. + // The number of Reserved Instances to purchase. InstanceCount *int64 `type:"integer" required:"true"` // Specified for Reserved Instance Marketplace offerings to limit the total - // order and ensure that the Reserved instances are not purchased at unexpected + // order and ensure that the Reserved Instances are not purchased at unexpected // prices. LimitPrice *ReservedInstanceLimitPrice `locationName:"limitPrice" type:"structure"` - // The ID of the Reserved instance offering to purchase. + // The ID of the Reserved Instance offering to purchase. ReservedInstancesOfferingId *string `type:"string" required:"true"` } @@ -19335,7 +19866,7 @@ func (s PurchaseReservedInstancesOfferingInput) GoString() string { type PurchaseReservedInstancesOfferingOutput struct { _ struct{} `type:"structure"` - // The IDs of the purchased Reserved instances. + // The IDs of the purchased Reserved Instances. ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string"` } @@ -19349,6 +19880,52 @@ func (s PurchaseReservedInstancesOfferingOutput) GoString() string { return s.String() } +// Contains the parameters for PurchaseScheduledInstances. +type PurchaseScheduledInstancesInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that ensures the idempotency of the request. + // For more information, see Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // One or more purchase requests. + PurchaseRequests []*PurchaseRequest `locationName:"PurchaseRequest" locationNameList:"PurchaseRequest" type:"list" required:"true"` +} + +// String returns the string representation +func (s PurchaseScheduledInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurchaseScheduledInstancesInput) GoString() string { + return s.String() +} + +// Contains the output of PurchaseScheduledInstances. +type PurchaseScheduledInstancesOutput struct { + _ struct{} `type:"structure"` + + // Information about the Scheduled Instances. + ScheduledInstanceSet []*ScheduledInstance `locationName:"scheduledInstanceSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s PurchaseScheduledInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s PurchaseScheduledInstancesOutput) GoString() string { + return s.String() +} + type RebootInstancesInput struct { _ struct{} `type:"structure"` @@ -20163,7 +20740,7 @@ func (s Reservation) GoString() string { return s.String() } -// Describes the limit price of a Reserved instance offering. +// Describes the limit price of a Reserved Instance offering. type ReservedInstanceLimitPrice struct { _ struct{} `type:"structure"` @@ -20186,57 +20763,57 @@ func (s ReservedInstanceLimitPrice) GoString() string { return s.String() } -// Describes a Reserved instance. +// Describes a Reserved Instance. type ReservedInstances struct { _ struct{} `type:"structure"` - // The Availability Zone in which the Reserved instance can be used. + // The Availability Zone in which the Reserved Instance can be used. AvailabilityZone *string `locationName:"availabilityZone" type:"string"` - // The currency of the Reserved instance. It's specified using ISO 4217 standard + // The currency of the Reserved Instance. It's specified using ISO 4217 standard // currency codes. At this time, the only supported currency is USD. CurrencyCode *string `locationName:"currencyCode" type:"string" enum:"CurrencyCodeValues"` - // The duration of the Reserved instance, in seconds. + // The duration of the Reserved Instance, in seconds. Duration *int64 `locationName:"duration" type:"long"` - // The time when the Reserved instance expires. + // The time when the Reserved Instance expires. End *time.Time `locationName:"end" type:"timestamp" timestampFormat:"iso8601"` - // The purchase price of the Reserved instance. + // The purchase price of the Reserved Instance. FixedPrice *float64 `locationName:"fixedPrice" type:"float"` // The number of reservations purchased. InstanceCount *int64 `locationName:"instanceCount" type:"integer"` - // The tenancy of the reserved instance. + // The tenancy of the instance. InstanceTenancy *string `locationName:"instanceTenancy" type:"string" enum:"Tenancy"` - // The instance type on which the Reserved instance can be used. + // The instance type on which the Reserved Instance can be used. InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` - // The Reserved instance offering type. + // The Reserved Instance offering type. OfferingType *string `locationName:"offeringType" type:"string" enum:"OfferingTypeValues"` - // The Reserved instance product platform description. + // The Reserved Instance product platform description. ProductDescription *string `locationName:"productDescription" type:"string" enum:"RIProductDescription"` // The recurring charge tag assigned to the resource. RecurringCharges []*RecurringCharge `locationName:"recurringCharges" locationNameList:"item" type:"list"` - // The ID of the Reserved instance. + // The ID of the Reserved Instance. ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string"` - // The date and time the Reserved instance started. + // The date and time the Reserved Instance started. Start *time.Time `locationName:"start" type:"timestamp" timestampFormat:"iso8601"` - // The state of the Reserved instance purchase. + // The state of the Reserved Instance purchase. State *string `locationName:"state" type:"string" enum:"ReservedInstanceState"` // Any tags assigned to the resource. Tags []*Tag `locationName:"tagSet" locationNameList:"item" type:"list"` - // The usage price of the Reserved instance, per hour. + // The usage price of the Reserved Instance, per hour. UsagePrice *float64 `locationName:"usagePrice" type:"float"` } @@ -20250,20 +20827,20 @@ func (s ReservedInstances) GoString() string { return s.String() } -// Describes the configuration settings for the modified Reserved instances. +// Describes the configuration settings for the modified Reserved Instances. type ReservedInstancesConfiguration struct { _ struct{} `type:"structure"` - // The Availability Zone for the modified Reserved instances. + // The Availability Zone for the modified Reserved Instances. AvailabilityZone *string `locationName:"availabilityZone" type:"string"` - // The number of modified Reserved instances. + // The number of modified Reserved Instances. InstanceCount *int64 `locationName:"instanceCount" type:"integer"` - // The instance type for the modified Reserved instances. + // The instance type for the modified Reserved Instances. InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` - // The network platform of the modified Reserved instances, which is either + // The network platform of the modified Reserved Instances, which is either // EC2-Classic or EC2-VPC. Platform *string `locationName:"platform" type:"string"` } @@ -20278,11 +20855,11 @@ func (s ReservedInstancesConfiguration) GoString() string { return s.String() } -// Describes the ID of a Reserved instance. +// Describes the ID of a Reserved Instance. type ReservedInstancesId struct { _ struct{} `type:"structure"` - // The ID of the Reserved instance. + // The ID of the Reserved Instance. ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string"` } @@ -20296,7 +20873,7 @@ func (s ReservedInstancesId) GoString() string { return s.String() } -// Describes a Reserved instance listing. +// Describes a Reserved Instance listing. type ReservedInstancesListing struct { _ struct{} `type:"structure"` @@ -20310,19 +20887,19 @@ type ReservedInstancesListing struct { // The number of instances in this state. InstanceCounts []*InstanceCount `locationName:"instanceCounts" locationNameList:"item" type:"list"` - // The price of the Reserved instance listing. + // The price of the Reserved Instance listing. PriceSchedules []*PriceSchedule `locationName:"priceSchedules" locationNameList:"item" type:"list"` - // The ID of the Reserved instance. + // The ID of the Reserved Instance. ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string"` - // The ID of the Reserved instance listing. + // The ID of the Reserved Instance listing. ReservedInstancesListingId *string `locationName:"reservedInstancesListingId" type:"string"` - // The status of the Reserved instance listing. + // The status of the Reserved Instance listing. Status *string `locationName:"status" type:"string" enum:"ListingStatus"` - // The reason for the current status of the Reserved instance listing. The response + // The reason for the current status of the Reserved Instance listing. The response // can be blank. StatusMessage *string `locationName:"statusMessage" type:"string"` @@ -20343,7 +20920,7 @@ func (s ReservedInstancesListing) GoString() string { return s.String() } -// Describes a Reserved instance modification. +// Describes a Reserved Instance modification. type ReservedInstancesModification struct { _ struct{} `type:"structure"` @@ -20358,16 +20935,16 @@ type ReservedInstancesModification struct { EffectiveDate *time.Time `locationName:"effectiveDate" type:"timestamp" timestampFormat:"iso8601"` // Contains target configurations along with their corresponding new Reserved - // instance IDs. + // Instance IDs. ModificationResults []*ReservedInstancesModificationResult `locationName:"modificationResultSet" locationNameList:"item" type:"list"` - // The IDs of one or more Reserved instances. + // The IDs of one or more Reserved Instances. ReservedInstancesIds []*ReservedInstancesId `locationName:"reservedInstancesSet" locationNameList:"item" type:"list"` - // A unique ID for the Reserved instance modification. + // A unique ID for the Reserved Instance modification. ReservedInstancesModificationId *string `locationName:"reservedInstancesModificationId" type:"string"` - // The status of the Reserved instances modification request. + // The status of the Reserved Instances modification request. Status *string `locationName:"status" type:"string"` // The reason for the status. @@ -20390,11 +20967,11 @@ func (s ReservedInstancesModification) GoString() string { type ReservedInstancesModificationResult struct { _ struct{} `type:"structure"` - // The ID for the Reserved instances that were created as part of the modification + // The ID for the Reserved Instances that were created as part of the modification // request. This field is only available when the modification is fulfilled. ReservedInstancesId *string `locationName:"reservedInstancesId" type:"string"` - // The target Reserved instances configurations supplied as part of the modification + // The target Reserved Instances configurations supplied as part of the modification // request. TargetConfiguration *ReservedInstancesConfiguration `locationName:"targetConfiguration" type:"structure"` } @@ -20409,28 +20986,28 @@ func (s ReservedInstancesModificationResult) GoString() string { return s.String() } -// Describes a Reserved instance offering. +// Describes a Reserved Instance offering. type ReservedInstancesOffering struct { _ struct{} `type:"structure"` - // The Availability Zone in which the Reserved instance can be used. + // The Availability Zone in which the Reserved Instance can be used. AvailabilityZone *string `locationName:"availabilityZone" type:"string"` - // The currency of the Reserved instance offering you are purchasing. It's specified + // The currency of the Reserved Instance offering you are purchasing. It's specified // using ISO 4217 standard currency codes. At this time, the only supported // currency is USD. CurrencyCode *string `locationName:"currencyCode" type:"string" enum:"CurrencyCodeValues"` - // The duration of the Reserved instance, in seconds. + // The duration of the Reserved Instance, in seconds. Duration *int64 `locationName:"duration" type:"long"` - // The purchase price of the Reserved instance. + // The purchase price of the Reserved Instance. FixedPrice *float64 `locationName:"fixedPrice" type:"float"` - // The tenancy of the reserved instance. + // The tenancy of the instance. InstanceTenancy *string `locationName:"instanceTenancy" type:"string" enum:"Tenancy"` - // The instance type on which the Reserved instance can be used. + // The instance type on which the Reserved Instance can be used. InstanceType *string `locationName:"instanceType" type:"string" enum:"InstanceType"` // Indicates whether the offering is available through the Reserved Instance @@ -20438,22 +21015,22 @@ type ReservedInstancesOffering struct { // this is true. Marketplace *bool `locationName:"marketplace" type:"boolean"` - // The Reserved instance offering type. + // The Reserved Instance offering type. OfferingType *string `locationName:"offeringType" type:"string" enum:"OfferingTypeValues"` - // The pricing details of the Reserved instance offering. + // The pricing details of the Reserved Instance offering. PricingDetails []*PricingDetail `locationName:"pricingDetailsSet" locationNameList:"item" type:"list"` - // The Reserved instance product platform description. + // The Reserved Instance product platform description. ProductDescription *string `locationName:"productDescription" type:"string" enum:"RIProductDescription"` // The recurring charge tag assigned to the resource. RecurringCharges []*RecurringCharge `locationName:"recurringCharges" locationNameList:"item" type:"list"` - // The ID of the Reserved instance offering. + // The ID of the Reserved Instance offering. ReservedInstancesOfferingId *string `locationName:"reservedInstancesOfferingId" type:"string"` - // The usage price of the Reserved instance, per hour. + // The usage price of the Reserved Instance, per hour. UsagePrice *float64 `locationName:"usagePrice" type:"float"` } @@ -21060,7 +21637,12 @@ type RunInstancesInput struct { // [EC2-VPC] The ID of the subnet to launch the instance into. SubnetId *string `type:"string"` - // The Base64-encoded MIME user data for the instances. + // Data to configure the instance, or a script to run during instance launch. + // For more information, see Running Commands on Your Linux Instance at Launch + // (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html) (Linux) + // and Adding User Data (http://docs.aws.amazon.com/AWSEC2/latest/WindowsGuide/ec2-instance-metadata.html#instancedata-add-user-data) + // (Windows). For API calls, the text must be base64-encoded. Command line tools + // perform encoding for you. UserData *string `type:"string"` } @@ -21092,6 +21674,60 @@ func (s RunInstancesMonitoringEnabled) GoString() string { return s.String() } +// Contains the parameters for RunScheduledInstances. +type RunScheduledInstancesInput struct { + _ struct{} `type:"structure"` + + // Unique, case-sensitive identifier that ensures the idempotency of the request. + // For more information, see Ensuring Idempotency (http://docs.aws.amazon.com/AWSEC2/latest/APIReference/Run_Instance_Idempotency.html). + ClientToken *string `type:"string"` + + // Checks whether you have the required permissions for the action, without + // actually making the request, and provides an error response. If you have + // the required permissions, the error response is DryRunOperation. Otherwise, + // it is UnauthorizedOperation. + DryRun *bool `type:"boolean"` + + // The number of instances. + // + // Default: 1 + InstanceCount *int64 `type:"integer"` + + // The launch specification. + LaunchSpecification *ScheduledInstancesLaunchSpecification `type:"structure" required:"true"` + + // The Scheduled Instance ID. + ScheduledInstanceId *string `type:"string" required:"true"` +} + +// String returns the string representation +func (s RunScheduledInstancesInput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RunScheduledInstancesInput) GoString() string { + return s.String() +} + +// Contains the output of RunScheduledInstances. +type RunScheduledInstancesOutput struct { + _ struct{} `type:"structure"` + + // The IDs of the newly launched instances. + InstanceIdSet []*string `locationName:"instanceIdSet" locationNameList:"item" type:"list"` +} + +// String returns the string representation +func (s RunScheduledInstancesOutput) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s RunScheduledInstancesOutput) GoString() string { + return s.String() +} + // Describes the storage parameters for S3 and S3 buckets for an instance store-backed // AMI. type S3Storage struct { @@ -21128,6 +21764,477 @@ func (s S3Storage) GoString() string { return s.String() } +// Describes a Scheduled Instance. +type ScheduledInstance struct { + _ struct{} `type:"structure"` + + // The Availability Zone. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The date when the Scheduled Instance was purchased. + CreateDate *time.Time `locationName:"createDate" type:"timestamp" timestampFormat:"iso8601"` + + // The hourly price for a single instance. + HourlyPrice *string `locationName:"hourlyPrice" type:"string"` + + // The number of instances. + InstanceCount *int64 `locationName:"instanceCount" type:"integer"` + + // The instance type. + InstanceType *string `locationName:"instanceType" type:"string"` + + // The network platform (EC2-Classic or EC2-VPC). + NetworkPlatform *string `locationName:"networkPlatform" type:"string"` + + // The time for the next schedule to start. + NextSlotStartTime *time.Time `locationName:"nextSlotStartTime" type:"timestamp" timestampFormat:"iso8601"` + + // The platform (Linux/UNIX or Windows). + Platform *string `locationName:"platform" type:"string"` + + // The time that the previous schedule ended or will end. + PreviousSlotEndTime *time.Time `locationName:"previousSlotEndTime" type:"timestamp" timestampFormat:"iso8601"` + + // The schedule recurrence. + Recurrence *ScheduledInstanceRecurrence `locationName:"recurrence" type:"structure"` + + // The Scheduled Instance ID. + ScheduledInstanceId *string `locationName:"scheduledInstanceId" type:"string"` + + // The number of hours in the schedule. + SlotDurationInHours *int64 `locationName:"slotDurationInHours" type:"integer"` + + // The end date for the Scheduled Instance. + TermEndDate *time.Time `locationName:"termEndDate" type:"timestamp" timestampFormat:"iso8601"` + + // The start date for the Scheduled Instance. + TermStartDate *time.Time `locationName:"termStartDate" type:"timestamp" timestampFormat:"iso8601"` + + // The total number of hours for a single instance for the entire term. + TotalScheduledInstanceHours *int64 `locationName:"totalScheduledInstanceHours" type:"integer"` +} + +// String returns the string representation +func (s ScheduledInstance) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstance) GoString() string { + return s.String() +} + +// Describes a schedule that is available for your Scheduled Instances. +type ScheduledInstanceAvailability struct { + _ struct{} `type:"structure"` + + // The Availability Zone. + AvailabilityZone *string `locationName:"availabilityZone" type:"string"` + + // The number of available instances. + AvailableInstanceCount *int64 `locationName:"availableInstanceCount" type:"integer"` + + // The time period for the first schedule to start. + FirstSlotStartTime *time.Time `locationName:"firstSlotStartTime" type:"timestamp" timestampFormat:"iso8601"` + + // The hourly price for a single instance. + HourlyPrice *string `locationName:"hourlyPrice" type:"string"` + + // The instance type. You can specify one of the C3, C4, M4, or R3 instance + // types. + InstanceType *string `locationName:"instanceType" type:"string"` + + // The maximum term. The only possible value is 365 days. + MaxTermDurationInDays *int64 `locationName:"maxTermDurationInDays" type:"integer"` + + // The minimum term. The only possible value is 365 days. + MinTermDurationInDays *int64 `locationName:"minTermDurationInDays" type:"integer"` + + // The network platform (EC2-Classic or EC2-VPC). + NetworkPlatform *string `locationName:"networkPlatform" type:"string"` + + // The platform (Linux/UNIX or Windows). + Platform *string `locationName:"platform" type:"string"` + + // The purchase token. This token expires in two hours. + PurchaseToken *string `locationName:"purchaseToken" type:"string"` + + // The schedule recurrence. + Recurrence *ScheduledInstanceRecurrence `locationName:"recurrence" type:"structure"` + + // The number of hours in the schedule. + SlotDurationInHours *int64 `locationName:"slotDurationInHours" type:"integer"` + + // The total number of hours for a single instance for the entire term. + TotalScheduledInstanceHours *int64 `locationName:"totalScheduledInstanceHours" type:"integer"` +} + +// String returns the string representation +func (s ScheduledInstanceAvailability) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstanceAvailability) GoString() string { + return s.String() +} + +// Describes the recurring schedule for a Scheduled Instance. +type ScheduledInstanceRecurrence struct { + _ struct{} `type:"structure"` + + // The frequency (Daily, Weekly, or Monthly). + Frequency *string `locationName:"frequency" type:"string"` + + // The interval quantity. The interval unit depends on the value of frequency. + // For example, every 2 weeks or every 2 months. + Interval *int64 `locationName:"interval" type:"integer"` + + // The days. For a monthly schedule, this is one or more days of the month (1-31). + // For a weekly schedule, this is one or more days of the week (1-7, where 1 + // is Sunday). + OccurrenceDaySet []*int64 `locationName:"occurrenceDaySet" locationNameList:"item" type:"list"` + + // Indicates whether the occurrence is relative to the end of the specified + // week or month. + OccurrenceRelativeToEnd *bool `locationName:"occurrenceRelativeToEnd" type:"boolean"` + + // The unit for occurrenceDaySet (DayOfWeek or DayOfMonth). + OccurrenceUnit *string `locationName:"occurrenceUnit" type:"string"` +} + +// String returns the string representation +func (s ScheduledInstanceRecurrence) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstanceRecurrence) GoString() string { + return s.String() +} + +// Describes the recurring schedule for a Scheduled Instance. +type ScheduledInstanceRecurrenceRequest struct { + _ struct{} `type:"structure"` + + // The frequency (Daily, Weekly, or Monthly). + Frequency *string `type:"string"` + + // The interval quantity. The interval unit depends on the value of Frequency. + // For example, every 2 weeks or every 2 months. + Interval *int64 `type:"integer"` + + // The days. For a monthly schedule, this is one or more days of the month (1-31). + // For a weekly schedule, this is one or more days of the week (1-7, where 1 + // is Sunday). You can't specify this value with a daily schedule. If the occurrence + // is relative to the end of the month, you can specify only a single day. + OccurrenceDays []*int64 `locationName:"OccurrenceDay" locationNameList:"OccurenceDay" type:"list"` + + // Indicates whether the occurrence is relative to the end of the specified + // week or month. You can't specify this value with a daily schedule. + OccurrenceRelativeToEnd *bool `type:"boolean"` + + // The unit for OccurrenceDays (DayOfWeek or DayOfMonth). This value is required + // for a monthly schedule. You can't specify DayOfWeek with a weekly schedule. + // You can't specify this value with a daily schedule. + OccurrenceUnit *string `type:"string"` +} + +// String returns the string representation +func (s ScheduledInstanceRecurrenceRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstanceRecurrenceRequest) GoString() string { + return s.String() +} + +// Describes a block device mapping for a Scheduled Instance. +type ScheduledInstancesBlockDeviceMapping struct { + _ struct{} `type:"structure"` + + // The device name exposed to the instance (for example, /dev/sdh or xvdh). + DeviceName *string `type:"string"` + + // Parameters used to set up EBS volumes automatically when the instance is + // launched. + Ebs *ScheduledInstancesEbs `type:"structure"` + + // Suppresses the specified device included in the block device mapping of the + // AMI. + NoDevice *string `type:"string"` + + // The virtual device name (ephemeralN). Instance store volumes are numbered + // starting from 0. An instance type with two available instance store volumes + // can specify mappings for ephemeral0 and ephemeral1.The number of available + // instance store volumes depends on the instance type. After you connect to + // the instance, you must mount the volume. + // + // Constraints: For M3 instances, you must specify instance store volumes in + // the block device mapping for the instance. When you launch an M3 instance, + // we ignore any instance store volumes specified in the block device mapping + // for the AMI. + VirtualName *string `type:"string"` +} + +// String returns the string representation +func (s ScheduledInstancesBlockDeviceMapping) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstancesBlockDeviceMapping) GoString() string { + return s.String() +} + +// Describes an EBS volume for a Scheduled Instance. +type ScheduledInstancesEbs struct { + _ struct{} `type:"structure"` + + // Indicates whether the volume is deleted on instance termination. + DeleteOnTermination *bool `type:"boolean"` + + // Indicates whether the volume is encrypted. You can attached encrypted volumes + // only to instances that support them. + Encrypted *bool `type:"boolean"` + + // The number of I/O operations per second (IOPS) that the volume supports. + // For Provisioned IOPS (SSD) volumes, this represents the number of IOPS that + // are provisioned for the volume. For General Purpose (SSD) volumes, this represents + // the baseline performance of the volume and the rate at which the volume accumulates + // I/O credits for bursting. For more information about General Purpose (SSD) + // baseline performance, I/O credits, and bursting, see Amazon EBS Volume Types + // (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/EBSVolumeTypes.html) + // in the Amazon Elastic Compute Cloud User Guide. + // + // Constraint: Range is 100 to 20000 for Provisioned IOPS (SSD) volumes and + // 3 to 10000 for General Purpose (SSD) volumes. + // + // Condition: This parameter is required for requests to create io1 volumes; + // it is not used in requests to create standard or gp2 volumes. + Iops *int64 `type:"integer"` + + // The ID of the snapshot. + SnapshotId *string `type:"string"` + + // The size of the volume, in GiB. + // + // Default: If you're creating the volume from a snapshot and don't specify + // a volume size, the default is the snapshot size. + VolumeSize *int64 `type:"integer"` + + // The volume type. gp2 for General Purpose (SSD) volumes, io1 for Provisioned + // IOPS (SSD) volumes, and standard for Magnetic volumes. + // + // Default: standard + VolumeType *string `type:"string"` +} + +// String returns the string representation +func (s ScheduledInstancesEbs) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstancesEbs) GoString() string { + return s.String() +} + +// Describes an IAM instance profile for a Scheduled Instance. +type ScheduledInstancesIamInstanceProfile struct { + _ struct{} `type:"structure"` + + // The Amazon Resource Name (ARN). + Arn *string `type:"string"` + + // The name. + Name *string `type:"string"` +} + +// String returns the string representation +func (s ScheduledInstancesIamInstanceProfile) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstancesIamInstanceProfile) GoString() string { + return s.String() +} + +// Describes the launch specification for a Scheduled Instance. +type ScheduledInstancesLaunchSpecification struct { + _ struct{} `type:"structure"` + + // One or more block device mapping entries. + BlockDeviceMappings []*ScheduledInstancesBlockDeviceMapping `locationName:"BlockDeviceMapping" locationNameList:"BlockDeviceMapping" type:"list"` + + // Indicates whether the instances are optimized for EBS I/O. This optimization + // provides dedicated throughput to Amazon EBS and an optimized configuration + // stack to provide optimal EBS I/O performance. This optimization isn't available + // with all instance types. Additional usage charges apply when using an EBS-optimized + // instance. + // + // Default: false + EbsOptimized *bool `type:"boolean"` + + // The IAM instance profile. + IamInstanceProfile *ScheduledInstancesIamInstanceProfile `type:"structure"` + + // The ID of the Amazon Machine Image (AMI). + ImageId *string `type:"string" required:"true"` + + // The instance type. + InstanceType *string `type:"string"` + + // The ID of the kernel. + KernelId *string `type:"string"` + + // The name of the key pair. + KeyName *string `type:"string"` + + // Enable or disable monitoring for the instances. + Monitoring *ScheduledInstancesMonitoring `type:"structure"` + + // One or more network interfaces. + NetworkInterfaces []*ScheduledInstancesNetworkInterface `locationName:"NetworkInterface" locationNameList:"NetworkInterface" type:"list"` + + // The placement information. + Placement *ScheduledInstancesPlacement `type:"structure"` + + // The ID of the RAM disk. + RamdiskId *string `type:"string"` + + // The IDs of one or more security groups. + SecurityGroupIds []*string `locationName:"SecurityGroupId" locationNameList:"SecurityGroupId" type:"list"` + + // The ID of the subnet in which to launch the instances. + SubnetId *string `type:"string"` + + // The base64-encoded MIME user data. + UserData *string `type:"string"` +} + +// String returns the string representation +func (s ScheduledInstancesLaunchSpecification) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstancesLaunchSpecification) GoString() string { + return s.String() +} + +// Describes whether monitoring is enabled for a Scheduled Instance. +type ScheduledInstancesMonitoring struct { + _ struct{} `type:"structure"` + + // Indicates whether monitoring is enabled. + Enabled *bool `type:"boolean"` +} + +// String returns the string representation +func (s ScheduledInstancesMonitoring) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstancesMonitoring) GoString() string { + return s.String() +} + +// Describes a network interface for a Scheduled Instance. +type ScheduledInstancesNetworkInterface struct { + _ struct{} `type:"structure"` + + // Indicates whether to assign a public IP address to instances launched in + // a VPC. The public IP address can only be assigned to a network interface + // for eth0, and can only be assigned to a new network interface, not an existing + // one. You cannot specify more than one network interface in the request. If + // launching into a default subnet, the default value is true. + AssociatePublicIpAddress *bool `type:"boolean"` + + // Indicates whether to delete the interface when the instance is terminated. + DeleteOnTermination *bool `type:"boolean"` + + // The description. + Description *string `type:"string"` + + // The index of the device for the network interface attachment. + DeviceIndex *int64 `type:"integer"` + + // The IDs of one or more security groups. + Groups []*string `locationName:"Group" locationNameList:"SecurityGroupId" type:"list"` + + // The ID of the network interface. + NetworkInterfaceId *string `type:"string"` + + // The IP address of the network interface within the subnet. + PrivateIpAddress *string `type:"string"` + + // The private IP addresses. + PrivateIpAddressConfigs []*ScheduledInstancesPrivateIpAddressConfig `locationName:"PrivateIpAddressConfig" locationNameList:"PrivateIpAddressConfigSet" type:"list"` + + // The number of secondary private IP addresses. + SecondaryPrivateIpAddressCount *int64 `type:"integer"` + + // The ID of the subnet. + SubnetId *string `type:"string"` +} + +// String returns the string representation +func (s ScheduledInstancesNetworkInterface) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstancesNetworkInterface) GoString() string { + return s.String() +} + +// Describes the placement for a Scheduled Instance. +type ScheduledInstancesPlacement struct { + _ struct{} `type:"structure"` + + // The Availability Zone. + AvailabilityZone *string `type:"string"` + + // The name of the placement group. + GroupName *string `type:"string"` +} + +// String returns the string representation +func (s ScheduledInstancesPlacement) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstancesPlacement) GoString() string { + return s.String() +} + +// Describes a private IP address for a Scheduled Instance. +type ScheduledInstancesPrivateIpAddressConfig struct { + _ struct{} `type:"structure"` + + // Indicates whether this is a primary IP address. Otherwise, this is a secondary + // IP address. + Primary *bool `type:"boolean"` + + // The IP address. + PrivateIpAddress *string `type:"string"` +} + +// String returns the string representation +func (s ScheduledInstancesPrivateIpAddressConfig) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s ScheduledInstancesPrivateIpAddressConfig) GoString() string { + return s.String() +} + // Describes a security group type SecurityGroup struct { _ struct{} `type:"structure"` @@ -21167,6 +22274,51 @@ func (s SecurityGroup) GoString() string { return s.String() } +// Describes the time period for a Scheduled Instance to start its first schedule. +// The time period must span less than one day. +type SlotDateTimeRangeRequest struct { + _ struct{} `type:"structure"` + + // The earliest date and time, in UTC, for the Scheduled Instance to start. + EarliestTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` + + // The latest date and time, in UTC, for the Scheduled Instance to start. This + // value must be later than or equal to the earliest date and at most three + // months in the future. + LatestTime *time.Time `type:"timestamp" timestampFormat:"iso8601" required:"true"` +} + +// String returns the string representation +func (s SlotDateTimeRangeRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SlotDateTimeRangeRequest) GoString() string { + return s.String() +} + +// Describes the time period for a Scheduled Instance to start its first schedule. +type SlotStartTimeRangeRequest struct { + _ struct{} `type:"structure"` + + // The earliest date and time, in UTC, for the Scheduled Instance to start. + EarliestTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` + + // The latest date and time, in UTC, for the Scheduled Instance to start. + LatestTime *time.Time `type:"timestamp" timestampFormat:"iso8601"` +} + +// String returns the string representation +func (s SlotStartTimeRangeRequest) String() string { + return awsutil.Prettify(s) +} + +// GoString returns the string representation +func (s SlotStartTimeRangeRequest) GoString() string { + return s.String() +} + // Describes a snapshot. type Snapshot struct { _ struct{} `type:"structure"` @@ -21826,7 +22978,8 @@ type StateReason struct { // Client.UserInitiatedShutdown: The instance was shut down using the Amazon // EC2 API. // - // Client.VolumeLimitExceeded: The volume limit was exceeded. + // Client.VolumeLimitExceeded: The limit on the number of EBS volumes or total + // storage was exceeded. Decrease usage or request an increase in your limits. // // Client.InvalidSnapshot.NotFound: The specified snapshot was not found. Message *string `locationName:"message" type:"string"` @@ -22811,7 +23964,8 @@ func (s VpnConnectionOptionsSpecification) GoString() string { type VpnGateway struct { _ struct{} `type:"structure"` - // The Availability Zone where the virtual private gateway was created. + // The Availability Zone where the virtual private gateway was created, if applicable. + // This field may be empty or not returned. AvailabilityZone *string `locationName:"availabilityZone" type:"string"` // The current state of the virtual private gateway. @@ -23242,6 +24396,8 @@ const ( // @enum InstanceType InstanceTypeM410xlarge = "m4.10xlarge" // @enum InstanceType + InstanceTypeT2Nano = "t2.nano" + // @enum InstanceType InstanceTypeT2Micro = "t2.micro" // @enum InstanceType InstanceTypeT2Small = "t2.small" diff --git a/vendor/github.com/aws/aws-sdk-go/service/opsworks/api.go b/vendor/github.com/aws/aws-sdk-go/service/opsworks/api.go index be8ffb4f5..eef0f2612 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/opsworks/api.go +++ b/vendor/github.com/aws/aws-sdk-go/service/opsworks/api.go @@ -2592,7 +2592,8 @@ type AutoScalingThresholds struct { // Allowing AWS OpsWorks to Act on Your Behalf (http://docs.aws.amazon.com/opsworks/latest/userguide/opsworks-security-servicerole.html). Alarms []*string `type:"list"` - // The CPU utilization threshold, as a percent of the available CPU. + // The CPU utilization threshold, as a percent of the available CPU. A value + // of -1 disables the threshold. CpuThreshold *float64 `type:"double"` // The amount of time (in minutes) after a scaling event occurs that AWS OpsWorks @@ -2608,11 +2609,12 @@ type AutoScalingThresholds struct { // The number of instances to add or remove when the load exceeds a threshold. InstanceCount *int64 `type:"integer"` - // The load threshold. For more information about how load is computed, see - // Load (computing) (http://en.wikipedia.org/wiki/Load_%28computing%29). + // The load threshold. A value of -1 disables the threshold. For more information + // about how load is computed, see Load (computing) (http://en.wikipedia.org/wiki/Load_%28computing%29). LoadThreshold *float64 `type:"double"` - // The memory utilization threshold, as a percent of the available memory. + // The memory utilization threshold, as a percent of the available memory. A + // value of -1 disables the threshold. MemoryThreshold *float64 `type:"double"` // The amount of time, in minutes, that the load must exceed a threshold before @@ -2716,9 +2718,10 @@ type CloneStackInput struct { // Whether to clone the source stack's permissions. ClonePermissions *bool `type:"boolean"` - // The configuration manager. When you clone a Linux stack we recommend that - // you use the configuration manager to specify the Chef version: 0.9, 11.4, - // or 11.10. The default value is currently 11.10. + // The configuration manager. When you clone a stack we recommend that you use + // the configuration manager to specify the Chef version: 12, 11.10, or 11.4 + // for Linux stacks, or 12.2 for Windows stacks. The default value for Linux + // stacks is currently 11.4. ConfigurationManager *StackConfigurationManager `type:"structure"` // Contains the information required to retrieve an app or cookbook from a repository. @@ -2751,7 +2754,7 @@ type CloneStackInput struct { // // A supported Linux operating system: An Amazon Linux version, such as Amazon // Linux 2015.03, Red Hat Enterprise Linux 7, Ubuntu 12.04 LTS, or Ubuntu 14.04 - // LTS. Microsoft Windows Server 2012 R2 Base. A custom AMI: Custom. You specify + // LTS. Microsoft Windows Server 2012 R2 Base. A custom AMI: Custom. You specify // the custom AMI you want to use when you create instances. For more information // on how to use custom AMIs with OpsWorks, see Using Custom AMIs (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). // The default option is the parent stack's operating system. For more information @@ -3091,7 +3094,7 @@ type CreateInstanceInput struct { // The default AWS OpsWorks agent version. You have the following options: // - // INHERIT - Use the stack's default agent version setting. version_number + // INHERIT - Use the stack's default agent version setting. version_number // - Use the specified agent version. This value overrides the stack's default // setting. To update the agent version, edit the instance configuration and // specify a new version. AWS OpsWorks then automatically installs that version @@ -3123,6 +3126,7 @@ type CreateInstanceInput struct { // An array of BlockDeviceMapping objects that specify the instance's block // devices. For more information, see Block Device Mapping (http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/block-device-mapping-concepts.html). + // Note that block device mappings are not supported for custom AMIs. BlockDeviceMappings []*BlockDeviceMapping `type:"list"` // Whether to create an Amazon EBS-optimized instance. @@ -3156,14 +3160,15 @@ type CreateInstanceInput struct { // // A supported Linux operating system: An Amazon Linux version, such as Amazon // Linux 2015.03, Red Hat Enterprise Linux 7, Ubuntu 12.04 LTS, or Ubuntu 14.04 - // LTS. Microsoft Windows Server 2012 R2 Base. A custom AMI: Custom. For more + // LTS. Microsoft Windows Server 2012 R2 Base. A custom AMI: Custom. For more // information on the supported operating systems, see AWS OpsWorks Operating // Systems (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). // // The default option is the current Amazon Linux version. If you set this // parameter to Custom, you must use the CreateInstance action's AmiId parameter - // to specify the custom AMI that you want to use. For more information on the - // supported operating systems, see Operating Systems (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html)For + // to specify the custom AMI that you want to use. Block device mappings are + // not supported if the value is Custom. For more information on the supported + // operating systems, see Operating Systems (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html)For // more information on how to use custom AMIs with AWS OpsWorks, see Using Custom // AMIs (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). Os *string `type:"string"` @@ -3240,6 +3245,7 @@ type CreateLayerInput struct { // A JSON-formatted string containing custom stack configuration and deployment // attributes to be installed on the layer's instances. For more information, // see Using Custom JSON (http://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook-json-override.html). + // This feature is supported as of version 1.7.42 of the AWS CLI. CustomJson *string `type:"string"` // A LayerCustomRecipes object that specifies the layer custom recipes. @@ -3334,9 +3340,10 @@ type CreateStackInput struct { // available. Fixed version - Set this parameter to your preferred agent version. // To update the agent version, you must edit the stack configuration and specify // a new version. AWS OpsWorks then automatically installs that version on the - // stack's instances. The default setting is LATEST. To specify an agent version, - // you must use the complete version number, not the abbreviated number shown - // on the console. For a list of available agent version numbers, call DescribeAgentVersions. + // stack's instances. The default setting is the most recent release of the + // agent. To specify an agent version, you must use the complete version number, + // not the abbreviated number shown on the console. For a list of available + // agent version numbers, call DescribeAgentVersions. // // You can also specify an agent version when you create or update an instance, // which overrides the stack's default setting. @@ -3350,9 +3357,10 @@ type CreateStackInput struct { // a New Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-creating.html). ChefConfiguration *ChefConfiguration `type:"structure"` - // The configuration manager. When you clone a stack we recommend that you use - // the configuration manager to specify the Chef version: 0.9, 11.4, or 11.10. - // The default value is currently 11.4. + // The configuration manager. When you create a stack we recommend that you + // use the configuration manager to specify the Chef version: 12, 11.10, or + // 11.4 for Linux stacks, or 12.2 for Windows stacks. The default value for + // Linux stacks is currently 11.4. ConfigurationManager *StackConfigurationManager `type:"structure"` // Contains the information required to retrieve an app or cookbook from a repository. @@ -3388,7 +3396,7 @@ type CreateStackInput struct { // // A supported Linux operating system: An Amazon Linux version, such as Amazon // Linux 2015.03, Red Hat Enterprise Linux 7, Ubuntu 12.04 LTS, or Ubuntu 14.04 - // LTS. Microsoft Windows Server 2012 R2 Base. A custom AMI: Custom. You specify + // LTS. Microsoft Windows Server 2012 R2 Base. A custom AMI: Custom. You specify // the custom AMI you want to use when you create instances. For more information, // see Using Custom AMIs (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). // The default option is the current Amazon Linux version. For more information @@ -3824,10 +3832,10 @@ type DeploymentCommand struct { // // The update_dependencies command takes two arguments: // - // upgrade_os_to - Specifies the desired Amazon Linux version for instances + // upgrade_os_to - Specifies the desired Amazon Linux version for instances // whose OS you want to upgrade, such as Amazon Linux 2014.09. You must also - // set the allow_reboot argument to true. allow_reboot - Specifies whether - // to allow AWS OpsWorks to reboot the instances if necessary, after installing + // set the allow_reboot argument to true. allow_reboot - Specifies whether to + // allow AWS OpsWorks to reboot the instances if necessary, after installing // the updates. This argument can be set to either true or false. The default // value is false. For example, to upgrade an instance to Amazon Linux 2014.09, // set Args to the following. @@ -3839,25 +3847,24 @@ type DeploymentCommand struct { // // For stacks, the following commands are available: // - // execute_recipes: Execute one or more recipes. To specify the recipes, - // set an Args parameter named recipes to the list of recipes to be executed. - // For example, to execute phpapp::appsetup, set Args to {"recipes":["phpapp::appsetup"]}. - // install_dependencies: Install the stack's dependencies. update_custom_cookbooks: - // Update the stack's custom cookbooks. update_dependencies: Update the stack's + // execute_recipes: Execute one or more recipes. To specify the recipes, set + // an Args parameter named recipes to the list of recipes to be executed. For + // example, to execute phpapp::appsetup, set Args to {"recipes":["phpapp::appsetup"]}. + // install_dependencies: Install the stack's dependencies. update_custom_cookbooks: + // Update the stack's custom cookbooks. update_dependencies: Update the stack's // dependencies. The update_dependencies and install_dependencies commands // are supported only for Linux instances. You can run the commands successfully // on Windows instances, but they do nothing. For apps, the following commands // are available: // - // deploy: Deploy an app. Ruby on Rails apps have an optional Args parameter + // deploy: Deploy an app. Ruby on Rails apps have an optional Args parameter // named migrate. Set Args to {"migrate":["true"]} to migrate the database. - // The default setting is {"migrate":["false"]}. rollback Roll the app back + // The default setting is {"migrate":["false"]}. rollback Roll the app back // to the previous version. When you update an app, AWS OpsWorks stores the // previous version, up to a maximum of five versions. You can use this command - // to roll an app back as many as four versions. start: Start the app's web - // or application server. stop: Stop the app's web or application server. - // restart: Restart the app's web or application server. undeploy: Undeploy - // the app. + // to roll an app back as many as four versions. start: Start the app's web + // or application server. stop: Stop the app's web or application server. restart: + // Restart the app's web or application server. undeploy: Undeploy the app. Name *string `type:"string" required:"true" enum:"DeploymentCommandName"` } @@ -6436,8 +6443,9 @@ type StackConfigurationManager struct { // The name. This parameter must be set to "Chef". Name *string `type:"string"` - // The Chef version. This parameter must be set to 0.9, 11.4, or 11.10. The - // default value is 11.4. + // The Chef version. This parameter must be set to 12, 11.10, or 11.4 for Linux + // stacks, and to 12.2 for Windows stacks. The default value for Linux stacks + // is 11.4. Version *string `type:"string"` } @@ -6836,7 +6844,7 @@ type UpdateInstanceInput struct { // The default AWS OpsWorks agent version. You have the following options: // - // INHERIT - Use the stack's default agent version setting. version_number + // INHERIT - Use the stack's default agent version setting. version_number // - Use the specified agent version. This value overrides the stack's default // setting. To update the agent version, you must edit the instance configuration // and specify a new version. AWS OpsWorks then automatically installs that @@ -6896,7 +6904,7 @@ type UpdateInstanceInput struct { // // A supported Linux operating system: An Amazon Linux version, such as Amazon // Linux 2015.03, Red Hat Enterprise Linux 7, Ubuntu 12.04 LTS, or Ubuntu 14.04 - // LTS. Microsoft Windows Server 2012 R2 Base. A custom AMI: Custom. For more + // LTS. Microsoft Windows Server 2012 R2 Base. A custom AMI: Custom. For more // information on the supported operating systems, see AWS OpsWorks Operating // Systems (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-os.html). // @@ -7128,9 +7136,10 @@ type UpdateStackInput struct { // a New Stack (http://docs.aws.amazon.com/opsworks/latest/userguide/workingstacks-creating.html). ChefConfiguration *ChefConfiguration `type:"structure"` - // The configuration manager. When you clone a stack, we recommend that you - // use the configuration manager to specify the Chef version: 0.9, 11.4, or - // 11.10. The default value is currently 11.4. + // The configuration manager. When you update a stack, we recommend that you + // use the configuration manager to specify the Chef version: 12, 11.10, or + // 11.4 for Linux stacks, or 12.2 for Windows stacks. The default value for + // Linux stacks is currently 11.4. ConfigurationManager *StackConfigurationManager `type:"structure"` // Contains the information required to retrieve an app or cookbook from a repository. @@ -7164,7 +7173,7 @@ type UpdateStackInput struct { // // A supported Linux operating system: An Amazon Linux version, such as Amazon // Linux 2015.03, Red Hat Enterprise Linux 7, Ubuntu 12.04 LTS, or Ubuntu 14.04 - // LTS. Microsoft Windows Server 2012 R2 Base. A custom AMI: Custom. You specify + // LTS. Microsoft Windows Server 2012 R2 Base. A custom AMI: Custom. You specify // the custom AMI you want to use when you create instances. For more information // on how to use custom AMIs with OpsWorks, see Using Custom AMIs (http://docs.aws.amazon.com/opsworks/latest/userguide/workinginstances-custom-ami.html). // The default option is the stack's current operating system. For more information @@ -7450,7 +7459,7 @@ type VolumeConfiguration struct { // The volume type: // - // standard - Magnetic io1 - Provisioned IOPS (SSD) gp2 - General Purpose + // standard - Magnetic io1 - Provisioned IOPS (SSD) gp2 - General Purpose // (SSD) VolumeType *string `type:"string"` } diff --git a/vendor/github.com/aws/aws-sdk-go/service/opsworks/service.go b/vendor/github.com/aws/aws-sdk-go/service/opsworks/service.go index a20f63f0f..70ea0a0a5 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/opsworks/service.go +++ b/vendor/github.com/aws/aws-sdk-go/service/opsworks/service.go @@ -43,13 +43,11 @@ import ( // // When you call CreateStack, CloneStack, or UpdateStack we recommend you use // the ConfigurationManager parameter to specify the Chef version. The recommended -// value for Linux stacks, which is also the default value, is currently 11.10. -// Windows stacks use Chef 12.2. For more information, see Chef Versions (http://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook-chef11.html). +// value for Linux stacks is currently 12 (the default is 11.4). Windows stacks +// use Chef 12.2. For more information, see Chef Versions (http://docs.aws.amazon.com/opsworks/latest/userguide/workingcookbook-chef11.html). // -// You can also specify Chef 11.4 or Chef 0.9 for your Linux stack. However, -// Chef 0.9 has been deprecated. We do not recommend using Chef 0.9 for new -// stacks, and we recommend migrating your existing Chef 0.9 stacks to Chef -// 11.10 as soon as possible. +// You can specify Chef 12, 11.10, or 11.4 for your Linux stack. We recommend +// migrating your existing Linux stacks to Chef 12 as soon as possible. //The service client's operations are safe to be used concurrently. // It is not safe to mutate any of the client's properties though. type OpsWorks struct { diff --git a/vendor/github.com/aws/aws-sdk-go/service/opsworks/waiters.go b/vendor/github.com/aws/aws-sdk-go/service/opsworks/waiters.go index ad5bf3570..8d07ac87e 100644 --- a/vendor/github.com/aws/aws-sdk-go/service/opsworks/waiters.go +++ b/vendor/github.com/aws/aws-sdk-go/service/opsworks/waiters.go @@ -118,6 +118,12 @@ func (c *OpsWorks) WaitUntilInstanceOnline(input *DescribeInstancesInput) error Argument: "Instances[].Status", Expected: "terminated", }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "stop_failed", + }, }, } @@ -189,6 +195,12 @@ func (c *OpsWorks) WaitUntilInstanceStopped(input *DescribeInstancesInput) error Argument: "Instances[].Status", Expected: "start_failed", }, + { + State: "failure", + Matcher: "pathAny", + Argument: "Instances[].Status", + Expected: "stop_failed", + }, }, } diff --git a/vendor/github.com/codegangsta/cli/app.go b/vendor/github.com/codegangsta/cli/app.go index 0805fd6a7..68849207d 100644 --- a/vendor/github.com/codegangsta/cli/app.go +++ b/vendor/github.com/codegangsta/cli/app.go @@ -5,13 +5,14 @@ import ( "io" "io/ioutil" "os" + "path" "time" ) // App is the main structure of a cli application. It is recomended that // an app be created with the cli.NewApp() function type App struct { - // The name of the program. Defaults to os.Args[0] + // The name of the program. Defaults to path.Base(os.Args[0]) Name string // Full name of command for help, defaults to Name HelpName string @@ -70,8 +71,8 @@ func compileTime() time.Time { // Creates a new cli Application with some reasonable defaults for Name, Usage, Version and Action. func NewApp() *App { return &App{ - Name: os.Args[0], - HelpName: os.Args[0], + Name: path.Base(os.Args[0]), + HelpName: path.Base(os.Args[0]), Usage: "A new cli application", Version: "0.0.0", BashComplete: DefaultAppComplete, @@ -163,6 +164,9 @@ func (a *App) Run(arguments []string) (err error) { if a.Before != nil { err := a.Before(context) if err != nil { + fmt.Fprintln(a.Writer, err) + fmt.Fprintln(a.Writer) + ShowAppHelp(context) return err } } diff --git a/vendor/github.com/codegangsta/cli/command.go b/vendor/github.com/codegangsta/cli/command.go index 824e77bae..e42178ee7 100644 --- a/vendor/github.com/codegangsta/cli/command.go +++ b/vendor/github.com/codegangsta/cli/command.go @@ -54,8 +54,8 @@ func (c Command) FullName() string { } // Invokes the command given the context, parses ctx.Args() to generate command-specific flags -func (c Command) Run(ctx *Context) error { - if len(c.Subcommands) > 0 || c.Before != nil || c.After != nil { +func (c Command) Run(ctx *Context) (err error) { + if len(c.Subcommands) > 0 { return c.startApp(ctx) } @@ -74,7 +74,6 @@ func (c Command) Run(ctx *Context) error { set := flagSet(c.Name, c.Flags) set.SetOutput(ioutil.Discard) - var err error if !c.SkipFlagParsing { firstFlagIndex := -1 terminatorIndex := -1 @@ -133,6 +132,30 @@ func (c Command) Run(ctx *Context) error { if checkCommandHelp(context, c.Name) { return nil } + + if c.After != nil { + defer func() { + afterErr := c.After(context) + if afterErr != nil { + if err != nil { + err = NewMultiError(err, afterErr) + } else { + err = afterErr + } + } + }() + } + + if c.Before != nil { + err := c.Before(context) + if err != nil { + fmt.Fprintln(ctx.App.Writer, err) + fmt.Fprintln(ctx.App.Writer) + ShowCommandHelp(ctx, c.Name) + return err + } + } + context.Command = c c.Action(context) return nil diff --git a/vendor/github.com/codegangsta/cli/context.go b/vendor/github.com/codegangsta/cli/context.go index f541f41c3..0513d34f6 100644 --- a/vendor/github.com/codegangsta/cli/context.go +++ b/vendor/github.com/codegangsta/cli/context.go @@ -163,7 +163,7 @@ func (c *Context) GlobalIsSet(name string) bool { // Returns a slice of flag names used in this context. func (c *Context) FlagNames() (names []string) { for _, flag := range c.Command.Flags { - name := strings.Split(flag.getName(), ",")[0] + name := strings.Split(flag.GetName(), ",")[0] if name == "help" { continue } @@ -175,7 +175,7 @@ func (c *Context) FlagNames() (names []string) { // Returns a slice of global flag names used by the app. func (c *Context) GlobalFlagNames() (names []string) { for _, flag := range c.App.Flags { - name := strings.Split(flag.getName(), ",")[0] + name := strings.Split(flag.GetName(), ",")[0] if name == "help" || name == "version" { continue } @@ -360,7 +360,7 @@ func normalizeFlags(flags []Flag, set *flag.FlagSet) error { visited[f.Name] = true }) for _, f := range flags { - parts := strings.Split(f.getName(), ",") + parts := strings.Split(f.GetName(), ",") if len(parts) == 1 { continue } diff --git a/vendor/github.com/codegangsta/cli/flag.go b/vendor/github.com/codegangsta/cli/flag.go index 9b22d7f1f..49f30994e 100644 --- a/vendor/github.com/codegangsta/cli/flag.go +++ b/vendor/github.com/codegangsta/cli/flag.go @@ -35,7 +35,7 @@ type Flag interface { fmt.Stringer // Apply Flag settings to the given flag set Apply(*flag.FlagSet) - getName() string + GetName() string } func flagSet(name string, flags []Flag) *flag.FlagSet { @@ -95,7 +95,7 @@ func (f GenericFlag) Apply(set *flag.FlagSet) { }) } -func (f GenericFlag) getName() string { +func (f GenericFlag) GetName() string { return f.Name } @@ -159,7 +159,7 @@ func (f StringSliceFlag) Apply(set *flag.FlagSet) { }) } -func (f StringSliceFlag) getName() string { +func (f StringSliceFlag) GetName() string { return f.Name } @@ -231,7 +231,7 @@ func (f IntSliceFlag) Apply(set *flag.FlagSet) { }) } -func (f IntSliceFlag) getName() string { +func (f IntSliceFlag) GetName() string { return f.Name } @@ -273,7 +273,7 @@ func (f BoolFlag) Apply(set *flag.FlagSet) { }) } -func (f BoolFlag) getName() string { +func (f BoolFlag) GetName() string { return f.Name } @@ -316,7 +316,7 @@ func (f BoolTFlag) Apply(set *flag.FlagSet) { }) } -func (f BoolTFlag) getName() string { +func (f BoolTFlag) GetName() string { return f.Name } @@ -364,7 +364,7 @@ func (f StringFlag) Apply(set *flag.FlagSet) { }) } -func (f StringFlag) getName() string { +func (f StringFlag) GetName() string { return f.Name } @@ -407,7 +407,7 @@ func (f IntFlag) Apply(set *flag.FlagSet) { }) } -func (f IntFlag) getName() string { +func (f IntFlag) GetName() string { return f.Name } @@ -450,7 +450,7 @@ func (f DurationFlag) Apply(set *flag.FlagSet) { }) } -func (f DurationFlag) getName() string { +func (f DurationFlag) GetName() string { return f.Name } @@ -492,7 +492,7 @@ func (f Float64Flag) Apply(set *flag.FlagSet) { }) } -func (f Float64Flag) getName() string { +func (f Float64Flag) GetName() string { return f.Name } diff --git a/vendor/github.com/codegangsta/cli/help.go b/vendor/github.com/codegangsta/cli/help.go index a246f63ac..ecb67c28a 100644 --- a/vendor/github.com/codegangsta/cli/help.go +++ b/vendor/github.com/codegangsta/cli/help.go @@ -180,7 +180,9 @@ func printHelp(out io.Writer, templ string, data interface{}) { t := template.Must(template.New("help").Funcs(funcMap).Parse(templ)) err := t.Execute(w, data) if err != nil { - panic(err) + // If the writer is closed, t.Execute will fail, and there's nothing + // we can do to recover. We could send this to os.Stderr if we need. + return } w.Flush() } diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs.go b/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs.go index 400978d8a..86aa631b7 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/btrfs/btrfs.go @@ -266,6 +266,14 @@ func (d *Driver) Create(id, parent, mountLabel string) error { } } + // if we have a remapped root (user namespaces enabled), change the created snapshot + // dir ownership to match + if rootUID != 0 || rootGID != 0 { + if err := os.Chown(path.Join(subvolumes, id), rootUID, rootGID); err != nil { + return err + } + } + return label.Relabel(path.Join(subvolumes, id), mountLabel, false) } @@ -278,7 +286,10 @@ func (d *Driver) Remove(id string) error { if err := subvolDelete(d.subvolumesDir(), id); err != nil { return err } - return os.RemoveAll(dir) + if err := os.RemoveAll(dir); err != nil && !os.IsNotExist(err) { + return err + } + return nil } // Get the requested filesystem id. diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/driver.go b/vendor/github.com/docker/docker/daemon/graphdriver/driver.go index 649d0a4b7..d9ab839c2 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/driver.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/driver.go @@ -22,8 +22,6 @@ const ( ) var ( - // DefaultDriver if a storage driver is not specified. - DefaultDriver string // All registered drivers drivers map[string]InitFunc @@ -130,12 +128,10 @@ func getBuiltinDriver(name, home string, options []string, uidMaps, gidMaps []id } // New creates the driver and initializes it at the specified root. -func New(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (driver Driver, err error) { - for _, name := range []string{os.Getenv("DOCKER_DRIVER"), DefaultDriver} { - if name != "" { - logrus.Debugf("[graphdriver] trying provided driver %q", name) // so the logs show specified driver - return GetDriver(name, root, options, uidMaps, gidMaps) - } +func New(root string, name string, options []string, uidMaps, gidMaps []idtools.IDMap) (driver Driver, err error) { + if name != "" { + logrus.Debugf("[graphdriver] trying provided driver %q", name) // so the logs show specified driver + return GetDriver(name, root, options, uidMaps, gidMaps) } // Guess for prior driver diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlay/copy.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlay/copy.go index def4d439c..7d81a83ab 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/overlay/copy.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/overlay/copy.go @@ -4,12 +4,12 @@ package overlay import ( "fmt" - "io" "os" "path/filepath" "syscall" "time" + "github.com/docker/docker/pkg/pools" "github.com/docker/docker/pkg/system" ) @@ -32,7 +32,7 @@ func copyRegular(srcPath, dstPath string, mode os.FileMode) error { } defer dstFile.Close() - _, err = io.Copy(dstFile, srcFile) + _, err = pools.Copy(dstFile, srcFile) return err } diff --git a/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay.go b/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay.go index 67b919300..59131bba0 100644 --- a/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay.go +++ b/vendor/github.com/docker/docker/daemon/graphdriver/overlay/overlay.go @@ -146,7 +146,7 @@ func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (grap return nil, err } // Create the driver home dir - if err := idtools.MkdirAllAs(home, 0755, rootUID, rootGID); err != nil && !os.IsExist(err) { + if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { return nil, err } @@ -322,7 +322,10 @@ func (d *Driver) dir(id string) string { // Remove cleans the directories that are created for this id. func (d *Driver) Remove(id string) error { - return os.RemoveAll(d.dir(id)) + if err := os.RemoveAll(d.dir(id)); err != nil && !os.IsNotExist(err) { + return err + } + return nil } // Get creates and mounts the required file system for the given id and returns the mount path. diff --git a/vendor/github.com/docker/docker/pkg/mount/flags.go b/vendor/github.com/docker/docker/pkg/mount/flags.go index 4305d1556..d2fb1fb4d 100644 --- a/vendor/github.com/docker/docker/pkg/mount/flags.go +++ b/vendor/github.com/docker/docker/pkg/mount/flags.go @@ -73,6 +73,7 @@ func parseOptions(options string) (int, string) { func ParseTmpfsOptions(options string) (int, string, error) { flags, data := parseOptions(options) validFlags := map[string]bool{ + "": true, "size": true, "mode": true, "uid": true, diff --git a/vendor/github.com/docker/docker/pkg/parsers/parsers.go b/vendor/github.com/docker/docker/pkg/parsers/parsers.go index aa80f44d2..acc897168 100644 --- a/vendor/github.com/docker/docker/pkg/parsers/parsers.go +++ b/vendor/github.com/docker/docker/pkg/parsers/parsers.go @@ -9,29 +9,6 @@ import ( "strings" ) -// PartParser parses and validates the specified string (data) using the specified template -// e.g. ip:public:private -> 192.168.0.1:80:8000 -func PartParser(template, data string) (map[string]string, error) { - // ip:public:private - var ( - templateParts = strings.Split(template, ":") - parts = strings.Split(data, ":") - out = make(map[string]string, len(templateParts)) - ) - if len(parts) != len(templateParts) { - return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template) - } - - for i, t := range templateParts { - value := "" - if len(parts) > i { - value = parts[i] - } - out[t] = value - } - return out, nil -} - // ParseKeyValueOpt parses and validates the specified string as a key/value pair (key=value) func ParseKeyValueOpt(opt string) (string, string, error) { parts := strings.SplitN(opt, "=", 2) @@ -41,32 +18,6 @@ func ParseKeyValueOpt(opt string) (string, string, error) { return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil } -// ParsePortRange parses and validates the specified string as a port-range (8000-9000) -func ParsePortRange(ports string) (uint64, uint64, error) { - if ports == "" { - return 0, 0, fmt.Errorf("Empty string specified for ports.") - } - if !strings.Contains(ports, "-") { - start, err := strconv.ParseUint(ports, 10, 16) - end := start - return start, end, err - } - - parts := strings.Split(ports, "-") - start, err := strconv.ParseUint(parts[0], 10, 16) - if err != nil { - return 0, 0, err - } - end, err := strconv.ParseUint(parts[1], 10, 16) - if err != nil { - return 0, 0, err - } - if end < start { - return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports) - } - return start, end, nil -} - // ParseUintList parses and validates the specified string as the value // found in some cgroup file (e.g. `cpuset.cpus`, `cpuset.mems`), which could be // one of the formats below. Note that duplicates are actually allowed in the diff --git a/vendor/github.com/docker/docker/pkg/plugins/client.go b/vendor/github.com/docker/docker/pkg/plugins/client.go index e9e31a89d..934a829ee 100644 --- a/vendor/github.com/docker/docker/pkg/plugins/client.go +++ b/vendor/github.com/docker/docker/pkg/plugins/client.go @@ -11,8 +11,8 @@ import ( "time" "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/sockets" - "github.com/docker/docker/pkg/tlsconfig" + "github.com/docker/go-connections/sockets" + "github.com/docker/go-connections/tlsconfig" ) const ( @@ -134,11 +134,10 @@ func (c *Client) callWithRetry(serviceMethod string, data io.Reader, retry bool) Err string } remoteErr := responseErr{} - if err := json.Unmarshal(b, &remoteErr); err != nil { - return nil, fmt.Errorf("%s: %s", serviceMethod, err) - } - if remoteErr.Err != "" { - return nil, fmt.Errorf("%s: %s", serviceMethod, remoteErr.Err) + if err := json.Unmarshal(b, &remoteErr); err == nil { + if remoteErr.Err != "" { + return nil, fmt.Errorf("%s: %s", serviceMethod, remoteErr.Err) + } } // old way... return nil, fmt.Errorf("%s: %s", serviceMethod, string(b)) diff --git a/vendor/github.com/docker/docker/pkg/plugins/discovery.go b/vendor/github.com/docker/docker/pkg/plugins/discovery.go index d0ee27485..3f7966178 100644 --- a/vendor/github.com/docker/docker/pkg/plugins/discovery.go +++ b/vendor/github.com/docker/docker/pkg/plugins/discovery.go @@ -13,7 +13,7 @@ import ( var ( // ErrNotFound plugin not found - ErrNotFound = errors.New("Plugin not found") + ErrNotFound = errors.New("plugin not found") socketsPath = "/run/docker/plugins" specsPaths = []string{"/etc/docker/plugins", "/usr/lib/docker/plugins"} ) @@ -25,6 +25,38 @@ func newLocalRegistry() localRegistry { return localRegistry{} } +// Scan scans all the plugin paths and returns all the names it found +func Scan() ([]string, error) { + var names []string + if err := filepath.Walk(socketsPath, func(path string, fi os.FileInfo, err error) error { + if err != nil { + return nil + } + + if fi.Mode()&os.ModeSocket != 0 { + name := strings.TrimSuffix(fi.Name(), filepath.Ext(fi.Name())) + names = append(names, name) + } + return nil + }); err != nil { + return nil, err + } + + for _, path := range specsPaths { + if err := filepath.Walk(path, func(p string, fi os.FileInfo, err error) error { + if err != nil || fi.IsDir() { + return nil + } + name := strings.TrimSuffix(fi.Name(), filepath.Ext(fi.Name())) + names = append(names, name) + return nil + }); err != nil { + return nil, err + } + } + return names, nil +} + // Plugin returns the plugin registered with the given name (or returns an error). func (l *localRegistry) Plugin(name string) (*Plugin, error) { socketpaths := pluginPaths(socketsPath, name, ".sock") diff --git a/vendor/github.com/docker/docker/pkg/plugins/plugins.go b/vendor/github.com/docker/docker/pkg/plugins/plugins.go index f9e22d651..7157107ba 100644 --- a/vendor/github.com/docker/docker/pkg/plugins/plugins.go +++ b/vendor/github.com/docker/docker/pkg/plugins/plugins.go @@ -28,7 +28,7 @@ import ( "time" "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/tlsconfig" + "github.com/docker/go-connections/tlsconfig" ) var ( @@ -96,7 +96,6 @@ func (p *Plugin) activateWithLock() error { return err } - logrus.Debugf("%s's manifest: %v", p.Name, m) p.Manifest = m for _, iface := range m.Implements { @@ -109,6 +108,15 @@ func (p *Plugin) activateWithLock() error { return nil } +func (p *Plugin) implements(kind string) bool { + for _, driver := range p.Manifest.Implements { + if driver == kind { + return true + } + } + return false +} + func load(name string) (*Plugin, error) { return loadWithRetry(name, true) } @@ -167,11 +175,9 @@ func Get(name, imp string) (*Plugin, error) { if err != nil { return nil, err } - for _, driver := range pl.Manifest.Implements { - logrus.Debugf("%s implements: %s", name, driver) - if driver == imp { - return pl, nil - } + if pl.implements(imp) { + logrus.Debugf("%s implements: %s", name, imp) + return pl, nil } return nil, ErrNotImplements } @@ -180,3 +186,37 @@ func Get(name, imp string) (*Plugin, error) { func Handle(iface string, fn func(string, *Client)) { extpointHandlers[iface] = fn } + +// GetAll returns all the plugins for the specified implementation +func GetAll(imp string) ([]*Plugin, error) { + pluginNames, err := Scan() + if err != nil { + return nil, err + } + + type plLoad struct { + pl *Plugin + err error + } + + chPl := make(chan plLoad, len(pluginNames)) + for _, name := range pluginNames { + go func(name string) { + pl, err := loadWithRetry(name, false) + chPl <- plLoad{pl, err} + }(name) + } + + var out []*Plugin + for i := 0; i < len(pluginNames); i++ { + pl := <-chPl + if pl.err != nil { + logrus.Error(err) + continue + } + if pl.pl.implements(imp) { + out = append(out, pl.pl) + } + } + return out, nil +} diff --git a/vendor/github.com/docker/docker/pkg/sockets/README.md b/vendor/github.com/docker/docker/pkg/sockets/README.md deleted file mode 100644 index e69de29bb..000000000 diff --git a/vendor/github.com/docker/docker/pkg/sockets/tcp_socket.go b/vendor/github.com/docker/docker/pkg/sockets/tcp_socket.go deleted file mode 100644 index 6665a3bde..000000000 --- a/vendor/github.com/docker/docker/pkg/sockets/tcp_socket.go +++ /dev/null @@ -1,44 +0,0 @@ -// Package sockets provides helper functions to create and configure Unix or TCP -// sockets. -package sockets - -import ( - "crypto/tls" - "net" - "net/http" - "time" -) - -// NewTCPSocket creates a TCP socket listener with the specified address and -// and the specified tls configuration. If TLSConfig is set, will encapsulate the -// TCP listener inside a TLS one. -func NewTCPSocket(addr string, tlsConfig *tls.Config) (net.Listener, error) { - l, err := net.Listen("tcp", addr) - if err != nil { - return nil, err - } - if tlsConfig != nil { - tlsConfig.NextProtos = []string{"http/1.1"} - l = tls.NewListener(l, tlsConfig) - } - return l, nil -} - -// ConfigureTCPTransport configures the specified Transport according to the -// specified proto and addr. -// If the proto is unix (using a unix socket to communicate) the compression -// is disabled. -func ConfigureTCPTransport(tr *http.Transport, proto, addr string) { - // Why 32? See https://github.com/docker/docker/pull/8035. - timeout := 32 * time.Second - if proto == "unix" { - // No need for compression in local communications. - tr.DisableCompression = true - tr.Dial = func(_, _ string) (net.Conn, error) { - return net.DialTimeout(proto, addr, timeout) - } - } else { - tr.Proxy = http.ProxyFromEnvironment - tr.Dial = (&net.Dialer{Timeout: timeout}).Dial - } -} diff --git a/vendor/github.com/docker/docker/pkg/sockets/unix_socket.go b/vendor/github.com/docker/docker/pkg/sockets/unix_socket.go deleted file mode 100644 index c10acedca..000000000 --- a/vendor/github.com/docker/docker/pkg/sockets/unix_socket.go +++ /dev/null @@ -1,80 +0,0 @@ -// +build linux freebsd - -package sockets - -import ( - "fmt" - "net" - "os" - "strconv" - "syscall" - - "github.com/Sirupsen/logrus" - "github.com/opencontainers/runc/libcontainer/user" -) - -// NewUnixSocket creates a unix socket with the specified path and group. -func NewUnixSocket(path, group string) (net.Listener, error) { - if err := syscall.Unlink(path); err != nil && !os.IsNotExist(err) { - return nil, err - } - mask := syscall.Umask(0777) - defer syscall.Umask(mask) - l, err := net.Listen("unix", path) - if err != nil { - return nil, err - } - if err := setSocketGroup(path, group); err != nil { - l.Close() - return nil, err - } - if err := os.Chmod(path, 0660); err != nil { - l.Close() - return nil, err - } - return l, nil -} - -func setSocketGroup(path, group string) error { - if group == "" { - return nil - } - if err := changeGroup(path, group); err != nil { - if group != "docker" { - return err - } - logrus.Debugf("Warning: could not change group %s to docker: %v", path, err) - } - return nil -} - -func changeGroup(path string, nameOrGid string) error { - gid, err := lookupGidByName(nameOrGid) - if err != nil { - return err - } - logrus.Debugf("%s group found. gid: %d", nameOrGid, gid) - return os.Chown(path, 0, gid) -} - -func lookupGidByName(nameOrGid string) (int, error) { - groupFile, err := user.GetGroupPath() - if err != nil { - return -1, err - } - groups, err := user.ParseGroupFileFilter(groupFile, func(g user.Group) bool { - return g.Name == nameOrGid || strconv.Itoa(g.Gid) == nameOrGid - }) - if err != nil { - return -1, err - } - if groups != nil && len(groups) > 0 { - return groups[0].Gid, nil - } - gid, err := strconv.Atoi(nameOrGid) - if err == nil { - logrus.Warnf("Could not find GID %d", gid) - return gid, nil - } - return -1, fmt.Errorf("Group %s not found", nameOrGid) -} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_solaris.go b/vendor/github.com/docker/docker/pkg/system/stat_solaris.go new file mode 100644 index 000000000..b01d08acf --- /dev/null +++ b/vendor/github.com/docker/docker/pkg/system/stat_solaris.go @@ -0,0 +1,17 @@ +// +build solaris + +package system + +import ( + "syscall" +) + +// fromStatT creates a system.StatT type from a syscall.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtim}, nil +} diff --git a/vendor/github.com/docker/docker/pkg/system/stat_unsupported.go b/vendor/github.com/docker/docker/pkg/system/stat_unsupported.go index 381ea8211..c6075d4ff 100644 --- a/vendor/github.com/docker/docker/pkg/system/stat_unsupported.go +++ b/vendor/github.com/docker/docker/pkg/system/stat_unsupported.go @@ -1,4 +1,4 @@ -// +build !linux,!windows,!freebsd +// +build !linux,!windows,!freebsd,!solaris package system diff --git a/vendor/github.com/docker/docker/pkg/tlsconfig/config.go b/vendor/github.com/docker/docker/pkg/tlsconfig/config.go deleted file mode 100644 index e3dfad1f0..000000000 --- a/vendor/github.com/docker/docker/pkg/tlsconfig/config.go +++ /dev/null @@ -1,133 +0,0 @@ -// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. -// -// As a reminder from https://golang.org/pkg/crypto/tls/#Config: -// A Config structure is used to configure a TLS client or server. After one has been passed to a TLS function it must not be modified. -// A Config may be reused; the tls package will also not modify it. -package tlsconfig - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" - "os" - - "github.com/Sirupsen/logrus" -) - -// Options represents the information needed to create client and server TLS configurations. -type Options struct { - CAFile string - - // If either CertFile or KeyFile is empty, Client() will not load them - // preventing the client from authenticating to the server. - // However, Server() requires them and will error out if they are empty. - CertFile string - KeyFile string - - // client-only option - InsecureSkipVerify bool - // server-only option - ClientAuth tls.ClientAuthType -} - -// Extra (server-side) accepted CBC cipher suites - will phase out in the future -var acceptedCBCCiphers = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_RSA_WITH_AES_128_CBC_SHA, -} - -// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) -var clientCipherSuites = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, -} - -// DefaultServerAcceptedCiphers should be uses by code which already has a crypto/tls -// options struct but wants to use a commonly accepted set of TLS cipher suites, with -// known weak algorithms removed. -var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...) - -// ServerDefault is a secure-enough TLS configuration for the server TLS configuration. -var ServerDefault = tls.Config{ - // Avoid fallback to SSL protocols < TLS1.0 - MinVersion: tls.VersionTLS10, - PreferServerCipherSuites: true, - CipherSuites: DefaultServerAcceptedCiphers, -} - -// ClientDefault is a secure-enough TLS configuration for the client TLS configuration. -var ClientDefault = tls.Config{ - // Prefer TLS1.2 as the client minimum - MinVersion: tls.VersionTLS12, - CipherSuites: clientCipherSuites, -} - -// certPool returns an X.509 certificate pool from `caFile`, the certificate file. -func certPool(caFile string) (*x509.CertPool, error) { - // If we should verify the server, we need to load a trusted ca - certPool := x509.NewCertPool() - pem, err := ioutil.ReadFile(caFile) - if err != nil { - return nil, fmt.Errorf("Could not read CA certificate %q: %v", caFile, err) - } - if !certPool.AppendCertsFromPEM(pem) { - return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile) - } - s := certPool.Subjects() - subjects := make([]string, len(s)) - for i, subject := range s { - subjects[i] = string(subject) - } - logrus.Debugf("Trusting certs with subjects: %v", subjects) - return certPool, nil -} - -// Client returns a TLS configuration meant to be used by a client. -func Client(options Options) (*tls.Config, error) { - tlsConfig := ClientDefault - tlsConfig.InsecureSkipVerify = options.InsecureSkipVerify - if !options.InsecureSkipVerify { - CAs, err := certPool(options.CAFile) - if err != nil { - return nil, err - } - tlsConfig.RootCAs = CAs - } - - if options.CertFile != "" && options.KeyFile != "" { - tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) - if err != nil { - return nil, fmt.Errorf("Could not load X509 key pair: %v. Make sure the key is not encrypted", err) - } - tlsConfig.Certificates = []tls.Certificate{tlsCert} - } - - return &tlsConfig, nil -} - -// Server returns a TLS configuration meant to be used by a server. -func Server(options Options) (*tls.Config, error) { - tlsConfig := ServerDefault - tlsConfig.ClientAuth = options.ClientAuth - tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) - if err != nil { - if os.IsNotExist(err) { - return nil, fmt.Errorf("Could not load X509 key pair (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err) - } - return nil, fmt.Errorf("Error reading X509 key pair (cert: %q, key: %q): %v. Make sure the key is not encrypted.", options.CertFile, options.KeyFile, err) - } - tlsConfig.Certificates = []tls.Certificate{tlsCert} - if options.ClientAuth >= tls.VerifyClientCertIfGiven { - CAs, err := certPool(options.CAFile) - if err != nil { - return nil, err - } - tlsConfig.ClientCAs = CAs - } - return &tlsConfig, nil -} diff --git a/vendor/github.com/docker/go-units/CONTRIBUTING.md b/vendor/github.com/docker/go-units/CONTRIBUTING.md new file mode 100644 index 000000000..9ea86d784 --- /dev/null +++ b/vendor/github.com/docker/go-units/CONTRIBUTING.md @@ -0,0 +1,67 @@ +# Contributing to go-units + +Want to hack on go-units? Awesome! Here are instructions to get you started. + +go-units is a part of the [Docker](https://www.docker.com) project, and follows +the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read Docker's +[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), +[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), +[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and +[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. diff --git a/vendor/github.com/docker/go-units/LICENSE b/vendor/github.com/docker/go-units/LICENSE.code similarity index 100% rename from vendor/github.com/docker/go-units/LICENSE rename to vendor/github.com/docker/go-units/LICENSE.code diff --git a/vendor/github.com/docker/go-units/LICENSE.docs b/vendor/github.com/docker/go-units/LICENSE.docs new file mode 100644 index 000000000..e26cd4fc8 --- /dev/null +++ b/vendor/github.com/docker/go-units/LICENSE.docs @@ -0,0 +1,425 @@ +Attribution-ShareAlike 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More_considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution-ShareAlike 4.0 International Public +License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution-ShareAlike 4.0 International Public License ("Public +License"). To the extent this Public License may be interpreted as a +contract, You are granted the Licensed Rights in consideration of Your +acceptance of these terms and conditions, and the Licensor grants You +such rights in consideration of benefits the Licensor receives from +making the Licensed Material available under these terms and +conditions. + + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. BY-SA Compatible License means a license listed at + creativecommons.org/compatiblelicenses, approved by Creative + Commons as essentially the equivalent of this Public License. + + d. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + + e. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + f. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + g. License Elements means the license attributes listed in the name + of a Creative Commons Public License. The License Elements of this + Public License are Attribution and ShareAlike. + + h. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + i. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + j. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + k. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + l. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + m. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part; and + + b. produce, reproduce, and Share Adapted Material. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. Additional offer from the Licensor -- Adapted Material. + Every recipient of Adapted Material from You + automatically receives an offer from the Licensor to + exercise the Licensed Rights in the Adapted Material + under the conditions of the Adapter's License You apply. + + c. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties. + + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + b. ShareAlike. + + In addition to the conditions in Section 3(a), if You Share + Adapted Material You produce, the following conditions also apply. + + 1. The Adapter's License You apply must be a Creative Commons + license with the same License Elements, this version or + later, or a BY-SA Compatible License. + + 2. You must include the text of, or the URI or hyperlink to, the + Adapter's License You apply. You may satisfy this condition + in any reasonable manner based on the medium, means, and + context in which You Share Adapted Material. + + 3. You may not offer or impose any additional or different terms + or conditions on, or apply any Effective Technological + Measures to, Adapted Material that restrict exercise of the + rights granted under the Adapter's License You apply. + + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material, + + including for purposes of Section 3(b); and + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + + +======================================================================= + +Creative Commons is not a party to its public licenses. +Notwithstanding, Creative Commons may elect to apply one of its public +licenses to material it publishes and in those instances will be +considered the "Licensor." Except for the limited purpose of indicating +that material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the public +licenses. + +Creative Commons may be contacted at creativecommons.org. diff --git a/vendor/github.com/docker/go-units/MAINTAINERS b/vendor/github.com/docker/go-units/MAINTAINERS new file mode 100644 index 000000000..477be8b21 --- /dev/null +++ b/vendor/github.com/docker/go-units/MAINTAINERS @@ -0,0 +1,27 @@ +# go-connections maintainers file +# +# This file describes who runs the docker/go-connections project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + [Org."Core maintainers"] + people = [ + "calavera", + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + [people.calavera] + Name = "David Calavera" + Email = "david.calavera@gmail.com" + GitHub = "calavera" diff --git a/vendor/github.com/docker/go-units/README.md b/vendor/github.com/docker/go-units/README.md index e2fb4051f..3ce4d79da 100644 --- a/vendor/github.com/docker/go-units/README.md +++ b/vendor/github.com/docker/go-units/README.md @@ -8,6 +8,11 @@ go-units is a library to transform human friendly measurements into machine frie See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation. -## License +## Copyright and license -go-units is licensed under the Apache License, Version 2.0. See [LICENSE](LICENSE) for the full license text. +Copyright © 2015 Docker, Inc. All rights reserved, except as follows. Code +is released under the Apache 2.0 license. The README.md file, and files in the +"docs" folder are licensed under the Creative Commons Attribution 4.0 +International License under the terms and conditions set forth in the file +"LICENSE.docs". You may obtain a duplicate copy of the same license, titled +CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/. diff --git a/vendor/github.com/docker/go-units/ulimit.go b/vendor/github.com/docker/go-units/ulimit.go index f0a7be292..5ac7fd825 100644 --- a/vendor/github.com/docker/go-units/ulimit.go +++ b/vendor/github.com/docker/go-units/ulimit.go @@ -73,25 +73,34 @@ func ParseUlimit(val string) (*Ulimit, error) { return nil, fmt.Errorf("invalid ulimit type: %s", parts[0]) } - limitVals := strings.SplitN(parts[1], ":", 2) - if len(limitVals) > 2 { + var ( + soft int64 + hard = &soft // default to soft in case no hard was set + temp int64 + err error + ) + switch limitVals := strings.Split(parts[1], ":"); len(limitVals) { + case 2: + temp, err = strconv.ParseInt(limitVals[1], 10, 64) + if err != nil { + return nil, err + } + hard = &temp + fallthrough + case 1: + soft, err = strconv.ParseInt(limitVals[0], 10, 64) + if err != nil { + return nil, err + } + default: return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1]) } - soft, err := strconv.ParseInt(limitVals[0], 10, 64) - if err != nil { - return nil, err + if soft > *hard { + return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, *hard) } - hard := soft // in case no hard was set - if len(limitVals) == 2 { - hard, err = strconv.ParseInt(limitVals[1], 10, 64) - } - if soft > hard { - return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, hard) - } - - return &Ulimit{Name: parts[0], Soft: soft, Hard: hard}, nil + return &Ulimit{Name: parts[0], Soft: soft, Hard: *hard}, nil } // GetRlimit returns the RLimit corresponding to Ulimit. diff --git a/vendor/github.com/fsouza/go-dockerclient/AUTHORS b/vendor/github.com/fsouza/go-dockerclient/AUTHORS index 2d4f98bb7..c6f775a10 100644 --- a/vendor/github.com/fsouza/go-dockerclient/AUTHORS +++ b/vendor/github.com/fsouza/go-dockerclient/AUTHORS @@ -12,6 +12,7 @@ Antonio Murdaca Artem Sidorenko Ben Marini Ben McCann +Benno van den Berg Brendan Fosberry Brian Lalor Brian P. Hamachek @@ -25,6 +26,7 @@ Cheah Chu Yeow cheneydeng Chris Bednarski CMGS +Colin Hebert Craig Jellick Dan Williams Daniel, Dao Quang Minh @@ -35,6 +37,7 @@ Dave Choi David Huie Dawn Chen Dinesh Subhraveti +Drew Wells Ed Elias G. Schneevoigt Erez Horev @@ -49,6 +52,7 @@ Guillermo Álvarez Fernández He Simei Ivan Mikushin James Bardin +James Nugent Jari Kolehmainen Jason Wilder Jawher Moussa diff --git a/vendor/github.com/fsouza/go-dockerclient/container.go b/vendor/github.com/fsouza/go-dockerclient/container.go index 4f9e9eb52..0baf01f97 100644 --- a/vendor/github.com/fsouza/go-dockerclient/container.go +++ b/vendor/github.com/fsouza/go-dockerclient/container.go @@ -976,7 +976,7 @@ type CommitContainerOptions struct { Container string Repository string `qs:"repo"` Tag string - Message string `qs:"m"` + Message string `qs:"comment"` Author string Run *Config `qs:"-"` } diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/README.md b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/README.md index 53d27d449..55d3a8d5f 100644 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/README.md +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/README.md @@ -220,6 +220,7 @@ Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/v | [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb | | [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb | | [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit | +| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic | #### Level logging @@ -300,12 +301,13 @@ The built-in logging formatters are: * `logrus/formatters/logstash.LogstashFormatter`. Logs fields as [Logstash](http://logstash.net) Events. ```go - logrus.SetFormatter(&logstash.LogstashFormatter{Type: “application_name"}) + logrus.SetFormatter(&logstash.LogstashFormatter{Type: "application_name"}) ``` Third party logging formatters: -* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. +* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. +* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. You can define your formatter by implementing the `Formatter` interface, requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a @@ -354,5 +356,10 @@ Log rotation is not provided with Logrus. Log rotation should be done by an external program (like `logrotate(8)`) that can compress and delete old log entries. It should not be a feature of the application-level logger. +#### Tools + +| Tool | Description | +| ---- | ----------- | +|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.| [godoc]: https://godoc.org/github.com/Sirupsen/logrus diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.go index 699ea035c..9ae900bc5 100644 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.go +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/entry.go @@ -8,6 +8,9 @@ import ( "time" ) +// Defines the key when adding errors using WithError. +var ErrorKey = "error" + // An entry is the final or intermediate Logrus logging entry. It contains all // the fields passed with WithField{,s}. It's finally logged when Debug, Info, // Warn, Error, Fatal or Panic is called on it. These objects can be reused and @@ -53,6 +56,11 @@ func (entry *Entry) String() (string, error) { return reader.String(), err } +// Add an error as single field (using the key defined in ErrorKey) to the Entry. +func (entry *Entry) WithError(err error) *Entry { + return entry.WithField(ErrorKey, err) +} + // Add a single field to the Entry. func (entry *Entry) WithField(key string, value interface{}) *Entry { return entry.WithFields(Fields{key: value}) @@ -70,12 +78,14 @@ func (entry *Entry) WithFields(fields Fields) *Entry { return &Entry{Logger: entry.Logger, Data: data} } -func (entry *Entry) log(level Level, msg string) { +// This function is not declared with a pointer value because otherwise +// race conditions will occur when using multiple goroutines +func (entry Entry) log(level Level, msg string) { entry.Time = time.Now() entry.Level = level entry.Message = msg - if err := entry.Logger.Hooks.Fire(level, entry); err != nil { + if err := entry.Logger.Hooks.Fire(level, &entry); err != nil { entry.Logger.mu.Lock() fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) entry.Logger.mu.Unlock() @@ -100,7 +110,7 @@ func (entry *Entry) log(level Level, msg string) { // panic() to use in Entry#Panic(), we avoid the allocation by checking // directly here. if level <= PanicLevel { - panic(entry) + panic(&entry) } } diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.go index a67e1b802..9a0120ac1 100644 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.go +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/exported.go @@ -48,6 +48,11 @@ func AddHook(hook Hook) { std.Hooks.Add(hook) } +// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key. +func WithError(err error) *Entry { + return std.WithField(ErrorKey, err) +} + // WithField creates an entry from the standard logger and adds a field to // it. If you want multiple fields, use `WithFields`. // diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.go index e4974bfbe..2fdb23176 100644 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.go +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logger.go @@ -8,7 +8,7 @@ import ( type Logger struct { // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a - // file, or leave it default which is `os.Stdout`. You can also set this to + // file, or leave it default which is `os.Stderr`. You can also set this to // something more adventorous, such as logging to Kafka. Out io.Writer // Hooks for the logger instance. These allow firing events based on logging @@ -53,7 +53,7 @@ func New() *Logger { // Adds a field to the log entry, note that you it doesn't log until you call // Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry. -// Ff you want multiple fields, use `WithFields`. +// If you want multiple fields, use `WithFields`. func (logger *Logger) WithField(key string, value interface{}) *Entry { return NewEntry(logger).WithField(key, value) } @@ -64,6 +64,12 @@ func (logger *Logger) WithFields(fields Fields) *Entry { return NewEntry(logger).WithFields(fields) } +// Add an error as single field to the log entry. All it does is call +// `WithError` for the given `error`. +func (logger *Logger) WithError(err error) *Entry { + return NewEntry(logger).WithError(err) +} + func (logger *Logger) Debugf(format string, args ...interface{}) { if logger.Level >= DebugLevel { NewEntry(logger).Debugf(format, args...) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logrus.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logrus.go index 43ee12e90..0c09fbc26 100644 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logrus.go +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/logrus.go @@ -74,7 +74,11 @@ const ( ) // Won't compile if StdLogger can't be realized by a log.Logger -var _ StdLogger = &log.Logger{} +var ( + _ StdLogger = &log.Logger{} + _ StdLogger = &Entry{} + _ StdLogger = &Logger{} +) // StdLogger is what your logrus-enabled library should take, that way // it'll accept a stdlib logger and a logrus logger. There's no standard diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_notwindows.go index 4bb537602..b343b3a37 100644 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_notwindows.go +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_notwindows.go @@ -12,9 +12,9 @@ import ( "unsafe" ) -// IsTerminal returns true if the given file descriptor is a terminal. +// IsTerminal returns true if stderr's file descriptor is a terminal. func IsTerminal() bool { - fd := syscall.Stdout + fd := syscall.Stderr var termios Termios _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) return err == 0 diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_solaris.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_solaris.go new file mode 100644 index 000000000..743df457f --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_solaris.go @@ -0,0 +1,15 @@ +// +build solaris + +package logrus + +import ( + "os" + + "github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal() bool { + _, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA) + return err == nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_windows.go index 2e09f6f7e..0146845d1 100644 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_windows.go +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus/terminal_windows.go @@ -18,9 +18,9 @@ var ( procGetConsoleMode = kernel32.NewProc("GetConsoleMode") ) -// IsTerminal returns true if the given file descriptor is a terminal. +// IsTerminal returns true if stderr's file descriptor is a terminal. func IsTerminal() bool { - fd := syscall.Stdout + fd := syscall.Stderr var st uint32 r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) return r != 0 && e == 0 diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts.go new file mode 100644 index 000000000..d1b698541 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/hosts.go @@ -0,0 +1,146 @@ +package opts + +import ( + "fmt" + "net" + "net/url" + "runtime" + "strconv" + "strings" +) + +var ( + // DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. docker daemon -H tcp:// + // TODO Windows. DefaultHTTPPort is only used on Windows if a -H parameter + // is not supplied. A better longer term solution would be to use a named + // pipe as the default on the Windows daemon. + // These are the IANA registered port numbers for use with Docker + // see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker + DefaultHTTPPort = 2375 // Default HTTP Port + // DefaultTLSHTTPPort Default HTTP Port used when TLS enabled + DefaultTLSHTTPPort = 2376 // Default TLS encrypted HTTP Port + // DefaultUnixSocket Path for the unix socket. + // Docker daemon by default always listens on the default unix socket + DefaultUnixSocket = "/var/run/docker.sock" + // DefaultTCPHost constant defines the default host string used by docker on Windows + DefaultTCPHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort) + // DefaultTLSHost constant defines the default host string used by docker for TLS sockets + DefaultTLSHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultTLSHTTPPort) +) + +// ValidateHost validates that the specified string is a valid host and returns it. +func ValidateHost(val string) (string, error) { + _, err := parseDockerDaemonHost(DefaultTCPHost, DefaultTLSHost, DefaultUnixSocket, "", val) + if err != nil { + return val, err + } + // Note: unlike most flag validators, we don't return the mutated value here + // we need to know what the user entered later (using ParseHost) to adjust for tls + return val, nil +} + +// ParseHost and set defaults for a Daemon host string +func ParseHost(defaultHost, val string) (string, error) { + host, err := parseDockerDaemonHost(DefaultTCPHost, DefaultTLSHost, DefaultUnixSocket, defaultHost, val) + if err != nil { + return val, err + } + return host, nil +} + +// parseDockerDaemonHost parses the specified address and returns an address that will be used as the host. +// Depending of the address specified, will use the defaultTCPAddr or defaultUnixAddr +// defaultUnixAddr must be a absolute file path (no `unix://` prefix) +// defaultTCPAddr must be the full `tcp://host:port` form +func parseDockerDaemonHost(defaultTCPAddr, defaultTLSHost, defaultUnixAddr, defaultAddr, addr string) (string, error) { + addr = strings.TrimSpace(addr) + if addr == "" { + if defaultAddr == defaultTLSHost { + return defaultTLSHost, nil + } + if runtime.GOOS != "windows" { + return fmt.Sprintf("unix://%s", defaultUnixAddr), nil + } + return defaultTCPAddr, nil + } + addrParts := strings.Split(addr, "://") + if len(addrParts) == 1 { + addrParts = []string{"tcp", addrParts[0]} + } + + switch addrParts[0] { + case "tcp": + return parseTCPAddr(addrParts[1], defaultTCPAddr) + case "unix": + return parseUnixAddr(addrParts[1], defaultUnixAddr) + case "fd": + return addr, nil + default: + return "", fmt.Errorf("Invalid bind address format: %s", addr) + } +} + +// parseUnixAddr parses and validates that the specified address is a valid UNIX +// socket address. It returns a formatted UNIX socket address, either using the +// address parsed from addr, or the contents of defaultAddr if addr is a blank +// string. +func parseUnixAddr(addr string, defaultAddr string) (string, error) { + addr = strings.TrimPrefix(addr, "unix://") + if strings.Contains(addr, "://") { + return "", fmt.Errorf("Invalid proto, expected unix: %s", addr) + } + if addr == "" { + addr = defaultAddr + } + return fmt.Sprintf("unix://%s", addr), nil +} + +// parseTCPAddr parses and validates that the specified address is a valid TCP +// address. It returns a formatted TCP address, either using the address parsed +// from tryAddr, or the contents of defaultAddr if tryAddr is a blank string. +// tryAddr is expected to have already been Trim()'d +// defaultAddr must be in the full `tcp://host:port` form +func parseTCPAddr(tryAddr string, defaultAddr string) (string, error) { + if tryAddr == "" || tryAddr == "tcp://" { + return defaultAddr, nil + } + addr := strings.TrimPrefix(tryAddr, "tcp://") + if strings.Contains(addr, "://") || addr == "" { + return "", fmt.Errorf("Invalid proto, expected tcp: %s", tryAddr) + } + + defaultAddr = strings.TrimPrefix(defaultAddr, "tcp://") + defaultHost, defaultPort, err := net.SplitHostPort(defaultAddr) + if err != nil { + return "", err + } + // url.Parse fails for trailing colon on IPv6 brackets on Go 1.5, but + // not 1.4. See https://github.com/golang/go/issues/12200 and + // https://github.com/golang/go/issues/6530. + if strings.HasSuffix(addr, "]:") { + addr += defaultPort + } + + u, err := url.Parse("tcp://" + addr) + if err != nil { + return "", err + } + + host, port, err := net.SplitHostPort(u.Host) + if err != nil { + return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) + } + + if host == "" { + host = defaultHost + } + if port == "" { + port = defaultPort + } + p, err := strconv.Atoi(port) + if err != nil && p == 0 { + return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) + } + + return fmt.Sprintf("tcp://%s%s", net.JoinHostPort(host, port), u.Path), nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip.go index d787b56ca..c7b0dc994 100644 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip.go +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ip.go @@ -22,7 +22,7 @@ func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt { } // Set sets an IPv4 or IPv6 address from a given string. If the given -// string is not parsable as an IP address it returns an error. +// string is not parseable as an IP address it returns an error. func (o *IPOpt) Set(val string) error { ip := net.ParseIP(val) if ip == nil { diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.go index df85a09e3..b244f5a3a 100644 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.go +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts.go @@ -4,35 +4,13 @@ import ( "fmt" "net" "os" - "path" "regexp" "strings" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers" ) var ( alphaRegexp = regexp.MustCompile(`[a-zA-Z]`) domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`) - // DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080 - DefaultHTTPHost = "localhost" - - // DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. docker daemon -H tcp:// - // TODO Windows. DefaultHTTPPort is only used on Windows if a -H parameter - // is not supplied. A better longer term solution would be to use a named - // pipe as the default on the Windows daemon. - // These are the IANA registered port numbers for use with Docker - // see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker - DefaultHTTPPort = 2375 // Default HTTP Port - // DefaultTLSHTTPPort Default HTTP Port used when TLS enabled - DefaultTLSHTTPPort = 2376 // Default TLS encrypted HTTP Port - // DefaultUnixSocket Path for the unix socket. - // Docker daemon by default always listens on the default unix socket - DefaultUnixSocket = "/var/run/docker.sock" - // DefaultTCPHost constant defines the default host string used by docker on Windows - DefaultTCPHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort) - // DefaultTLSHost constant defines the default host string used by docker for TLS sockets - DefaultTLSHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultTLSHTTPPort) ) // ListOpts holds a list of values and a validation function. @@ -98,6 +76,16 @@ func (opts *ListOpts) GetAll() []string { return (*opts.values) } +// GetAllOrEmpty returns the values of the slice +// or an empty slice when there are no values. +func (opts *ListOpts) GetAllOrEmpty() []string { + v := *opts.values + if v == nil { + return make([]string, 0) + } + return v +} + // Get checks the existence of the specified key. func (opts *ListOpts) Get(key string) bool { for _, k := range *opts.values { @@ -175,82 +163,6 @@ func ValidateAttach(val string) (string, error) { return val, fmt.Errorf("valid streams are STDIN, STDOUT and STDERR") } -// ValidateLink validates that the specified string has a valid link format (containerName:alias). -func ValidateLink(val string) (string, error) { - if _, _, err := parsers.ParseLink(val); err != nil { - return val, err - } - return val, nil -} - -// ValidDeviceMode checks if the mode for device is valid or not. -// Valid mode is a composition of r (read), w (write), and m (mknod). -func ValidDeviceMode(mode string) bool { - var legalDeviceMode = map[rune]bool{ - 'r': true, - 'w': true, - 'm': true, - } - if mode == "" { - return false - } - for _, c := range mode { - if !legalDeviceMode[c] { - return false - } - legalDeviceMode[c] = false - } - return true -} - -// ValidateDevice validates a path for devices -// It will make sure 'val' is in the form: -// [host-dir:]container-path[:mode] -// It also validates the device mode. -func ValidateDevice(val string) (string, error) { - return validatePath(val, ValidDeviceMode) -} - -func validatePath(val string, validator func(string) bool) (string, error) { - var containerPath string - var mode string - - if strings.Count(val, ":") > 2 { - return val, fmt.Errorf("bad format for path: %s", val) - } - - split := strings.SplitN(val, ":", 3) - if split[0] == "" { - return val, fmt.Errorf("bad format for path: %s", val) - } - switch len(split) { - case 1: - containerPath = split[0] - val = path.Clean(containerPath) - case 2: - if isValid := validator(split[1]); isValid { - containerPath = split[0] - mode = split[1] - val = fmt.Sprintf("%s:%s", path.Clean(containerPath), mode) - } else { - containerPath = split[1] - val = fmt.Sprintf("%s:%s", split[0], path.Clean(containerPath)) - } - case 3: - containerPath = split[1] - mode = split[2] - if isValid := validator(split[2]); !isValid { - return val, fmt.Errorf("bad mode specified: %s", mode) - } - val = fmt.Sprintf("%s:%s:%s", split[0], containerPath, mode) - } - - if !path.IsAbs(containerPath) { - return val, fmt.Errorf("%s is not an absolute path", containerPath) - } - return val, nil -} - // ValidateEnv validates an environment variable and returns it. // If no value is specified, it returns the current value using os.Getenv. // @@ -329,26 +241,6 @@ func ValidateLabel(val string) (string, error) { return val, nil } -// ValidateHost validates that the specified string is a valid host and returns it. -func ValidateHost(val string) (string, error) { - _, err := parsers.ParseDockerDaemonHost(DefaultTCPHost, DefaultTLSHost, DefaultUnixSocket, "", val) - if err != nil { - return val, err - } - // Note: unlike most flag validators, we don't return the mutated value here - // we need to know what the user entered later (using ParseHost) to adjust for tls - return val, nil -} - -// ParseHost and set defaults for a Daemon host string -func ParseHost(defaultHost, val string) (string, error) { - host, err := parsers.ParseDockerDaemonHost(DefaultTCPHost, DefaultTLSHost, DefaultUnixSocket, defaultHost, val) - if err != nil { - return val, err - } - return host, nil -} - func doesEnvExist(name string) bool { for _, entry := range os.Environ() { parts := strings.SplitN(entry, "=", 2) diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts_unix.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts_unix.go new file mode 100644 index 000000000..f1ce844a8 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts_unix.go @@ -0,0 +1,6 @@ +// +build !windows + +package opts + +// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080 +const DefaultHTTPHost = "localhost" diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts_windows.go new file mode 100644 index 000000000..2a9e2be74 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/opts_windows.go @@ -0,0 +1,56 @@ +package opts + +// TODO Windows. Identify bug in GOLang 1.5.1 and/or Windows Server 2016 TP4. +// @jhowardmsft, @swernli. +// +// On Windows, this mitigates a problem with the default options of running +// a docker client against a local docker daemon on TP4. +// +// What was found that if the default host is "localhost", even if the client +// (and daemon as this is local) is not physically on a network, and the DNS +// cache is flushed (ipconfig /flushdns), then the client will pause for +// exactly one second when connecting to the daemon for calls. For example +// using docker run windowsservercore cmd, the CLI will send a create followed +// by an attach. You see the delay between the attach finishing and the attach +// being seen by the daemon. +// +// Here's some daemon debug logs with additional debug spew put in. The +// AfterWriteJSON log is the very last thing the daemon does as part of the +// create call. The POST /attach is the second CLI call. Notice the second +// time gap. +// +// time="2015-11-06T13:38:37.259627400-08:00" level=debug msg="After createRootfs" +// time="2015-11-06T13:38:37.263626300-08:00" level=debug msg="After setHostConfig" +// time="2015-11-06T13:38:37.267631200-08:00" level=debug msg="before createContainerPl...." +// time="2015-11-06T13:38:37.271629500-08:00" level=debug msg=ToDiskLocking.... +// time="2015-11-06T13:38:37.275643200-08:00" level=debug msg="loggin event...." +// time="2015-11-06T13:38:37.277627600-08:00" level=debug msg="logged event...." +// time="2015-11-06T13:38:37.279631800-08:00" level=debug msg="In defer func" +// time="2015-11-06T13:38:37.282628100-08:00" level=debug msg="After daemon.create" +// time="2015-11-06T13:38:37.286651700-08:00" level=debug msg="return 2" +// time="2015-11-06T13:38:37.289629500-08:00" level=debug msg="Returned from daemon.ContainerCreate" +// time="2015-11-06T13:38:37.311629100-08:00" level=debug msg="After WriteJSON" +// ... 1 second gap here.... +// time="2015-11-06T13:38:38.317866200-08:00" level=debug msg="Calling POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach" +// time="2015-11-06T13:38:38.326882500-08:00" level=info msg="POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach?stderr=1&stdin=1&stdout=1&stream=1" +// +// We suspect this is either a bug introduced in GOLang 1.5.1, or that a change +// in GOLang 1.5.1 (from 1.4.3) is exposing a bug in Windows TP4. In theory, +// the Windows networking stack is supposed to resolve "localhost" internally, +// without hitting DNS, or even reading the hosts file (which is why localhost +// is commented out in the hosts file on Windows). +// +// We have validated that working around this using the actual IPv4 localhost +// address does not cause the delay. +// +// This does not occur with the docker client built with 1.4.3 on the same +// Windows TP4 build, regardless of whether the daemon is built using 1.5.1 +// or 1.4.3. It does not occur on Linux. We also verified we see the same thing +// on a cross-compiled Windows binary (from Linux). +// +// Final note: This is a mitigation, not a 'real' fix. It is still susceptible +// to the delay in TP4 if a user were to do 'docker run -H=tcp://localhost:2375...' +// explicitly. + +// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080 +const DefaultHTTPHost = "127.0.0.1" diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ulimit.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ulimit.go deleted file mode 100644 index 7cd480791..000000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts/ulimit.go +++ /dev/null @@ -1,52 +0,0 @@ -package opts - -import ( - "fmt" - - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit" -) - -// UlimitOpt defines a map of Ulimits -type UlimitOpt struct { - values *map[string]*ulimit.Ulimit -} - -// NewUlimitOpt creates a new UlimitOpt -func NewUlimitOpt(ref *map[string]*ulimit.Ulimit) *UlimitOpt { - if ref == nil { - ref = &map[string]*ulimit.Ulimit{} - } - return &UlimitOpt{ref} -} - -// Set validates a Ulimit and sets its name as a key in UlimitOpt -func (o *UlimitOpt) Set(val string) error { - l, err := ulimit.Parse(val) - if err != nil { - return err - } - - (*o.values)[l.Name] = l - - return nil -} - -// String returns Ulimit values as a string. -func (o *UlimitOpt) String() string { - var out []string - for _, v := range *o.values { - out = append(out, v.String()) - } - - return fmt.Sprintf("%v", out) -} - -// GetList returns a slice of pointers to Ulimits. -func (o *UlimitOpt) GetList() []*ulimit.Ulimit { - var ulimits []*ulimit.Ulimit - for _, v := range *o.values { - ulimits = append(ulimits, v) - } - - return ulimits -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive.go index fb3327f12..ce84347d3 100644 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive.go +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive.go @@ -31,7 +31,7 @@ type ( Archive io.ReadCloser // Reader is a type of io.Reader. Reader io.Reader - // Compression is the state represtents if compressed or not. + // Compression is the state represents if compressed or not. Compression int // TarChownOptions wraps the chown options UID and GID. TarChownOptions struct { @@ -77,6 +77,11 @@ var ( defaultArchiver = &Archiver{Untar: Untar, UIDMaps: nil, GIDMaps: nil} ) +const ( + // HeaderSize is the size in bytes of a tar header + HeaderSize = 512 +) + const ( // Uncompressed represents the uncompressed. Uncompressed Compression = iota @@ -88,7 +93,8 @@ const ( Xz ) -// IsArchive checks if it is a archive by the header. +// IsArchive checks for the magic bytes of a tar or any supported compression +// algorithm. func IsArchive(header []byte) bool { compression := DetectCompression(header) if compression != Uncompressed { @@ -99,6 +105,23 @@ func IsArchive(header []byte) bool { return err == nil } +// IsArchivePath checks if the (possibly compressed) file at the given path +// starts with a tar file header. +func IsArchivePath(path string) bool { + file, err := os.Open(path) + if err != nil { + return false + } + defer file.Close() + rdr, err := DecompressStream(file) + if err != nil { + return false + } + r := tar.NewReader(rdr) + _, err = r.Next() + return err == nil +} + // DetectCompression detects the compression algorithm of the source. func DetectCompression(source []byte) Compression { for compression, m := range map[Compression][]byte{ @@ -128,7 +151,13 @@ func DecompressStream(archive io.Reader) (io.ReadCloser, error) { p := pools.BufioReader32KPool buf := p.Get(archive) bs, err := buf.Peek(10) - if err != nil { + if err != nil && err != io.EOF { + // Note: we'll ignore any io.EOF error because there are some odd + // cases where the layer.tar file will be empty (zero bytes) and + // that results in an io.EOF from the Peek() call. So, in those + // cases we'll just treat it as a non-compressed stream and + // that means just create an empty layer. + // See Issue 18170 return nil, err } @@ -275,8 +304,9 @@ func (ta *tarAppender) addTarFile(path, name string) error { } //handle re-mapping container ID mappings back to host ID mappings before - //writing tar headers/files - if ta.UIDMaps != nil || ta.GIDMaps != nil { + //writing tar headers/files. We skip whiteout files because they were written + //by the kernel and already have proper ownership relative to the host + if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && (ta.UIDMaps != nil || ta.GIDMaps != nil) { uid, gid, err := getFileUIDGID(fi.Sys()) if err != nil { return err @@ -407,19 +437,25 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L return err } + aTime := hdr.AccessTime + if aTime.Before(hdr.ModTime) { + // Last access time should never be before last modified time. + aTime = hdr.ModTime + } + // system.Chtimes doesn't support a NOFOLLOW flag atm if hdr.Typeflag == tar.TypeLink { if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { - if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { return err } } } else if hdr.Typeflag != tar.TypeSymlink { - if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { + if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { return err } } else { - ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)} + ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { return err } @@ -794,10 +830,7 @@ func (archiver *Archiver) UntarPath(src, dst string) error { GIDMaps: archiver.GIDMaps, } } - if err := archiver.Untar(archive, dst, options); err != nil { - return err - } - return nil + return archiver.Untar(archive, dst, options) } // UntarPath is a convenience function which looks for an archive diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_unix.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_unix.go index 07693e37c..86c688825 100644 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_unix.go +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_unix.go @@ -19,7 +19,7 @@ func fixVolumePathPrefix(srcPath string) string { } // getWalkRoot calculates the root path when performing a TarWithOptions. -// We use a seperate function as this is platform specific. On Linux, we +// We use a separate function as this is platform specific. On Linux, we // can't use filepath.Join(srcPath,include) because this will clean away // a trailing "." or "/" which may be important. func getWalkRoot(srcPath string, include string) string { diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_windows.go index fbabc03a4..23d60aa41 100644 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_windows.go +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/archive_windows.go @@ -19,7 +19,7 @@ func fixVolumePathPrefix(srcPath string) string { } // getWalkRoot calculates the root path when performing a TarWithOptions. -// We use a seperate function as this is platform specific. +// We use a separate function as this is platform specific. func getWalkRoot(srcPath string, include string) string { return filepath.Join(srcPath, include) } diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes.go index 12ec40163..a2a1dc36e 100644 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes.go +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/changes.go @@ -31,6 +31,18 @@ const ( ChangeDelete ) +func (c ChangeType) String() string { + switch c { + case ChangeModify: + return "C" + case ChangeAdd: + return "A" + case ChangeDelete: + return "D" + } + return "" +} + // Change represents a change, it wraps the change type and path. // It describes changes of the files in the path respect to the // parent layers. The change could be modify, add, delete. @@ -41,16 +53,7 @@ type Change struct { } func (change *Change) String() string { - var kind string - switch change.Kind { - case ChangeModify: - kind = "C" - case ChangeAdd: - kind = "A" - case ChangeDelete: - kind = "D" - } - return fmt.Sprintf("%s %s", kind, change.Path) + return fmt.Sprintf("%s %s", change.Kind, change.Path) } // for sort.Sort @@ -147,7 +150,7 @@ func Changes(layers []string, rw string) ([]Change, error) { // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. // This block is here to ensure the change is recorded even if the - // modify time, mode and size of the parent directoriy in the rw and ro layers are all equal. + // modify time, mode and size of the parent directory in the rw and ro layers are all equal. // Check https://github.com/docker/docker/pull/13590 for details. if f.IsDir() { changedDirs[path] = struct{}{} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy.go index 251c9bd99..e95091264 100644 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy.go +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/copy.go @@ -135,30 +135,17 @@ type CopyInfo struct { // operation. The given path should be an absolute local path. A source path // has all symlinks evaluated that appear before the last path separator ("/" // on Unix). As it is to be a copy source, the path must exist. -func CopyInfoSourcePath(path string) (CopyInfo, error) { - // Split the given path into its Directory and Base components. We will - // evaluate symlinks in the directory component then append the base. +func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) { + // normalize the file path and then evaluate the symbol link + // we will use the target file instead of the symbol link if + // followLink is set path = normalizePath(path) - dirPath, basePath := filepath.Split(path) - resolvedDirPath, err := filepath.EvalSymlinks(dirPath) + resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink) if err != nil { return CopyInfo{}, err } - // resolvedDirPath will have been cleaned (no trailing path separators) so - // we can manually join it with the base path element. - resolvedPath := resolvedDirPath + string(filepath.Separator) + basePath - - var rebaseName string - if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) { - // In the case where the path had a trailing separator and a symlink - // evaluation has changed the last path component, we will need to - // rebase the name in the archive that is being copied to match the - // originally requested name. - rebaseName = filepath.Base(path) - } - stat, err := os.Lstat(resolvedPath) if err != nil { return CopyInfo{}, err @@ -279,7 +266,10 @@ func PrepareArchiveCopy(srcContent Reader, srcInfo, dstInfo CopyInfo) (dstDir st // The destination exists as some type of file and the source content // is also a file. The source content entry will have to be renamed to // have a basename which matches the destination path's basename. - return dstDir, rebaseArchiveEntries(srcContent, srcBase, dstBase), nil + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil case srcInfo.IsDir: // The destination does not exist and the source content is an archive // of a directory. The archive should be extracted to the parent of @@ -287,7 +277,10 @@ func PrepareArchiveCopy(srcContent Reader, srcInfo, dstInfo CopyInfo) (dstDir st // created as a result should take the name of the destination path. // The source content entries will have to be renamed to have a // basename which matches the destination path's basename. - return dstDir, rebaseArchiveEntries(srcContent, srcBase, dstBase), nil + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil case assertsDirectory(dstInfo.Path): // The destination does not exist and is asserted to be created as a // directory, but the source content is not a directory. This is an @@ -301,14 +294,17 @@ func PrepareArchiveCopy(srcContent Reader, srcInfo, dstInfo CopyInfo) (dstDir st // to be created when the archive is extracted and the source content // entry will have to be renamed to have a basename which matches the // destination path's basename. - return dstDir, rebaseArchiveEntries(srcContent, srcBase, dstBase), nil + if len(srcInfo.RebaseName) != 0 { + srcBase = srcInfo.RebaseName + } + return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil } } -// rebaseArchiveEntries rewrites the given srcContent archive replacing +// RebaseArchiveEntries rewrites the given srcContent archive replacing // an occurrence of oldBase with newBase at the beginning of entry names. -func rebaseArchiveEntries(srcContent Reader, oldBase, newBase string) Archive { +func RebaseArchiveEntries(srcContent Reader, oldBase, newBase string) Archive { if oldBase == string(os.PathSeparator) { // If oldBase specifies the root directory, use an empty string as // oldBase instead so that newBase doesn't replace the path separator @@ -355,7 +351,7 @@ func rebaseArchiveEntries(srcContent Reader, oldBase, newBase string) Archive { // CopyResource performs an archive copy from the given source path to the // given destination path. The source path MUST exist and the destination // path's parent directory must exist. -func CopyResource(srcPath, dstPath string) error { +func CopyResource(srcPath, dstPath string, followLink bool) error { var ( srcInfo CopyInfo err error @@ -369,7 +365,7 @@ func CopyResource(srcPath, dstPath string) error { srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath) dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath) - if srcInfo, err = CopyInfoSourcePath(srcPath); err != nil { + if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil { return err } @@ -405,3 +401,58 @@ func CopyTo(content Reader, srcInfo CopyInfo, dstPath string) error { return Untar(copyArchive, dstDir, options) } + +// ResolveHostSourcePath decides real path need to be copied with parameters such as +// whether to follow symbol link or not, if followLink is true, resolvedPath will return +// link target of any symbol link file, else it will only resolve symlink of directory +// but return symbol link file itself without resolving. +func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) { + if followLink { + resolvedPath, err = filepath.EvalSymlinks(path) + if err != nil { + return + } + + resolvedPath, rebaseName = GetRebaseName(path, resolvedPath) + } else { + dirPath, basePath := filepath.Split(path) + + // if not follow symbol link, then resolve symbol link of parent dir + var resolvedDirPath string + resolvedDirPath, err = filepath.EvalSymlinks(dirPath) + if err != nil { + return + } + // resolvedDirPath will have been cleaned (no trailing path separators) so + // we can manually join it with the base path element. + resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath + if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) { + rebaseName = filepath.Base(path) + } + } + return resolvedPath, rebaseName, nil +} + +// GetRebaseName normalizes and compares path and resolvedPath, +// return completed resolved path and rebased file name +func GetRebaseName(path, resolvedPath string) (string, string) { + // linkTarget will have been cleaned (no trailing path separators and dot) so + // we can manually join it with them + var rebaseName string + if specifiesCurrentDir(path) && !specifiesCurrentDir(resolvedPath) { + resolvedPath += string(filepath.Separator) + "." + } + + if hasTrailingPathSeparator(path) && !hasTrailingPathSeparator(resolvedPath) { + resolvedPath += string(filepath.Separator) + } + + if filepath.Base(path) != filepath.Base(resolvedPath) { + // In the case where the path had a trailing separator and a symlink + // evaluation has changed the last path component, we will need to + // rebase the name in the archive that is being copied to match the + // originally requested name. + rebaseName = filepath.Base(path) + } + return resolvedPath, rebaseName +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/diff.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/diff.go index 5ec71110a..887dd54cc 100644 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/diff.go +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/diff.go @@ -25,6 +25,7 @@ func UnpackLayer(dest string, layer Reader, options *TarOptions) (size int64, er defer pools.BufioReader32KPool.Put(trBuf) var dirs []*tar.Header + unpackedPaths := make(map[string]struct{}) if options == nil { options = &TarOptions{} @@ -134,14 +135,27 @@ func UnpackLayer(dest string, layer Reader, options *TarOptions) (size int64, er if strings.HasPrefix(base, WhiteoutPrefix) { dir := filepath.Dir(path) if base == WhiteoutOpaqueDir { - fi, err := os.Lstat(dir) - if err != nil && !os.IsNotExist(err) { - return 0, err - } - if err := os.RemoveAll(dir); err != nil { + _, err := os.Lstat(dir) + if err != nil { return 0, err } - if err := os.Mkdir(dir, fi.Mode()&os.ModePerm); err != nil { + err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { + if err != nil { + if os.IsNotExist(err) { + err = nil // parent was deleted + } + return err + } + if path == dir { + return nil + } + if _, exists := unpackedPaths[path]; !exists { + err := os.RemoveAll(path) + return err + } + return nil + }) + if err != nil { return 0, err } } else { @@ -214,6 +228,7 @@ func UnpackLayer(dest string, layer Reader, options *TarOptions) (size int64, er if hdr.Typeflag == tar.TypeDir { dirs = append(dirs, hdr) } + unpackedPaths[path] = struct{}{} } } diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/whiteouts.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/whiteouts.go index 3d9c31321..d20478a10 100644 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/whiteouts.go +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive/whiteouts.go @@ -9,7 +9,7 @@ package archive const WhiteoutPrefix = ".wh." // WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not -// for remoing an actaul file. Normally these files are excluded from exported +// for removing an actual file. Normally these files are excluded from exported // archives. const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils.go index 5559732a0..a15cf4bc5 100644 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils.go +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils/fileutils.go @@ -6,7 +6,9 @@ import ( "io" "os" "path/filepath" + "regexp" "strings" + "text/scanner" "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus" ) @@ -76,7 +78,7 @@ func Matches(file string, patterns []string) (bool, error) { // OptimizedMatches is basically the same as fileutils.Matches() but optimized for archive.go. // It will assume that the inputs have been preprocessed and therefore the function -// doen't need to do as much error checking and clean-up. This was done to avoid +// doesn't need to do as much error checking and clean-up. This was done to avoid // repeating these steps on each file being checked during the archive process. // The more generic fileutils.Matches() can't make these assumptions. func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) { @@ -92,15 +94,15 @@ func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, pattern = pattern[1:] } - match, err := filepath.Match(pattern, file) + match, err := regexpMatch(pattern, file) if err != nil { - return false, err + return false, fmt.Errorf("Error in pattern (%s): %s", pattern, err) } if !match && parentPath != "." { // Check to see if the pattern matches one of our parent dirs. if len(patDirs[i]) <= len(parentPathDirs) { - match, _ = filepath.Match(strings.Join(patDirs[i], "/"), + match, _ = regexpMatch(strings.Join(patDirs[i], "/"), strings.Join(parentPathDirs[:len(patDirs[i])], "/")) } } @@ -117,6 +119,99 @@ func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, return matched, nil } +// regexpMatch tries to match the logic of filepath.Match but +// does so using regexp logic. We do this so that we can expand the +// wildcard set to include other things, like "**" to mean any number +// of directories. This means that we should be backwards compatible +// with filepath.Match(). We'll end up supporting more stuff, due to +// the fact that we're using regexp, but that's ok - it does no harm. +func regexpMatch(pattern, path string) (bool, error) { + regStr := "^" + + // Do some syntax checking on the pattern. + // filepath's Match() has some really weird rules that are inconsistent + // so instead of trying to dup their logic, just call Match() for its + // error state and if there is an error in the pattern return it. + // If this becomes an issue we can remove this since its really only + // needed in the error (syntax) case - which isn't really critical. + if _, err := filepath.Match(pattern, path); err != nil { + return false, err + } + + // Go through the pattern and convert it to a regexp. + // We use a scanner so we can support utf-8 chars. + var scan scanner.Scanner + scan.Init(strings.NewReader(pattern)) + + sl := string(os.PathSeparator) + escSL := sl + if sl == `\` { + escSL += `\` + } + + for scan.Peek() != scanner.EOF { + ch := scan.Next() + + if ch == '*' { + if scan.Peek() == '*' { + // is some flavor of "**" + scan.Next() + + if scan.Peek() == scanner.EOF { + // is "**EOF" - to align with .gitignore just accept all + regStr += ".*" + } else { + // is "**" + regStr += "((.*" + escSL + ")|([^" + escSL + "]*))" + } + + // Treat **/ as ** so eat the "/" + if string(scan.Peek()) == sl { + scan.Next() + } + } else { + // is "*" so map it to anything but "/" + regStr += "[^" + escSL + "]*" + } + } else if ch == '?' { + // "?" is any char except "/" + regStr += "[^" + escSL + "]" + } else if strings.Index(".$", string(ch)) != -1 { + // Escape some regexp special chars that have no meaning + // in golang's filepath.Match + regStr += `\` + string(ch) + } else if ch == '\\' { + // escape next char. Note that a trailing \ in the pattern + // will be left alone (but need to escape it) + if sl == `\` { + // On windows map "\" to "\\", meaning an escaped backslash, + // and then just continue because filepath.Match on + // Windows doesn't allow escaping at all + regStr += escSL + continue + } + if scan.Peek() != scanner.EOF { + regStr += `\` + string(scan.Next()) + } else { + regStr += `\` + } + } else { + regStr += string(ch) + } + } + + regStr += "$" + + res, err := regexp.MatchString(regStr, path) + + // Map regexp's error to filepath's so no one knows we're not using filepath + if err != nil { + err = filepath.ErrBadPattern + } + + return res, err +} + // CopyFile copies from src to dst until either EOF is reached // on src or an error occurs. It verifies src exists and remove // the dst if it exists. diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/bytespipe.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/bytespipe.go index 932e1d1bc..e263c284f 100644 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/bytespipe.go +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/bytespipe.go @@ -1,16 +1,32 @@ package ioutils +import ( + "errors" + "io" + "sync" +) + +// maxCap is the highest capacity to use in byte slices that buffer data. const maxCap = 1e6 -// BytesPipe is io.ReadWriter which works similarly to pipe(queue). -// All written data could be read only once. Also BytesPipe is allocating -// and releasing new byte slices to adjust to current needs, so there won't be -// overgrown buffer after high load peak. -// BytesPipe isn't goroutine-safe, caller must synchronize it if needed. +// blockThreshold is the minimum number of bytes in the buffer which will cause +// a write to BytesPipe to block when allocating a new slice. +const blockThreshold = 1e6 + +// ErrClosed is returned when Write is called on a closed BytesPipe. +var ErrClosed = errors.New("write to closed BytesPipe") + +// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue). +// All written data may be read at most once. Also, BytesPipe allocates +// and releases new byte slices to adjust to current needs, so the buffer +// won't be overgrown after peak loads. type BytesPipe struct { + mu sync.Mutex + wait *sync.Cond buf [][]byte // slice of byte-slices of buffered data lastRead int // index in the first slice to a read point bufLen int // length of data buffered over the slices + closeErr error // error to return from next Read. set to nil if not closed. } // NewBytesPipe creates new BytesPipe, initialized by specified slice. @@ -20,15 +36,23 @@ func NewBytesPipe(buf []byte) *BytesPipe { if cap(buf) == 0 { buf = make([]byte, 0, 64) } - return &BytesPipe{ + bp := &BytesPipe{ buf: [][]byte{buf[:0]}, } + bp.wait = sync.NewCond(&bp.mu) + return bp } // Write writes p to BytesPipe. // It can allocate new []byte slices in a process of writing. -func (bp *BytesPipe) Write(p []byte) (n int, err error) { +func (bp *BytesPipe) Write(p []byte) (int, error) { + bp.mu.Lock() + defer bp.mu.Unlock() + written := 0 for { + if bp.closeErr != nil { + return written, ErrClosed + } // write data to the last buffer b := bp.buf[len(bp.buf)-1] // copy data to the current empty allocated area @@ -38,6 +62,8 @@ func (bp *BytesPipe) Write(p []byte) (n int, err error) { // include written data in last buffer bp.buf[len(bp.buf)-1] = b[:len(b)+n] + written += n + // if there was enough room to write all then break if len(p) == n { break @@ -45,15 +71,40 @@ func (bp *BytesPipe) Write(p []byte) (n int, err error) { // more data: write to the next slice p = p[n:] + + // block if too much data is still in the buffer + for bp.bufLen >= blockThreshold { + bp.wait.Wait() + } + // allocate slice that has twice the size of the last unless maximum reached nextCap := 2 * cap(bp.buf[len(bp.buf)-1]) - if maxCap < nextCap { + if nextCap > maxCap { nextCap = maxCap } // add new byte slice to the buffers slice and continue writing bp.buf = append(bp.buf, make([]byte, 0, nextCap)) } - return + bp.wait.Broadcast() + return written, nil +} + +// CloseWithError causes further reads from a BytesPipe to return immediately. +func (bp *BytesPipe) CloseWithError(err error) error { + bp.mu.Lock() + if err != nil { + bp.closeErr = err + } else { + bp.closeErr = io.EOF + } + bp.wait.Broadcast() + bp.mu.Unlock() + return nil +} + +// Close causes further reads from a BytesPipe to return immediately. +func (bp *BytesPipe) Close() error { + return bp.CloseWithError(nil) } func (bp *BytesPipe) len() int { @@ -63,6 +114,17 @@ func (bp *BytesPipe) len() int { // Read reads bytes from BytesPipe. // Data could be read only once. func (bp *BytesPipe) Read(p []byte) (n int, err error) { + bp.mu.Lock() + defer bp.mu.Unlock() + if bp.len() == 0 { + if bp.closeErr != nil { + return 0, bp.closeErr + } + bp.wait.Wait() + if bp.len() == 0 && bp.closeErr != nil { + return 0, bp.closeErr + } + } for { read := copy(p, bp.buf[0][bp.lastRead:]) n += read @@ -85,5 +147,6 @@ func (bp *BytesPipe) Read(p []byte) (n int, err error) { bp.buf[0] = nil // throw away old slice bp.buf = bp.buf[1:] // switch to next } + bp.wait.Broadcast() return } diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader.go index f231aa9da..0d2d76b47 100644 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader.go +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/multireader.go @@ -53,7 +53,7 @@ func (r *multiReadSeeker) Seek(offset int64, whence int) (int64, error) { } if rdrOffset == s && i != len(r.readers)-1 { - idx += 1 + idx++ rdrOffset = 0 } r.pos = &pos{idx, rdrOffset} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/readers.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/readers.go index 54dd312bb..a891955ac 100644 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/readers.go +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/readers.go @@ -4,7 +4,8 @@ import ( "crypto/sha256" "encoding/hex" "io" - "sync" + + "github.com/fsouza/go-dockerclient/external/golang.org/x/net/context" ) type readCloserWrapper struct { @@ -45,92 +46,6 @@ func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { } } -// bufReader allows the underlying reader to continue to produce -// output by pre-emptively reading from the wrapped reader. -// This is achieved by buffering this data in bufReader's -// expanding buffer. -type bufReader struct { - sync.Mutex - buf io.ReadWriter - reader io.Reader - err error - wait sync.Cond - drainBuf []byte -} - -// NewBufReader returns a new bufReader. -func NewBufReader(r io.Reader) io.ReadCloser { - reader := &bufReader{ - buf: NewBytesPipe(nil), - reader: r, - drainBuf: make([]byte, 1024), - } - reader.wait.L = &reader.Mutex - go reader.drain() - return reader -} - -// NewBufReaderWithDrainbufAndBuffer returns a BufReader with drainBuffer and buffer. -func NewBufReaderWithDrainbufAndBuffer(r io.Reader, drainBuffer []byte, buffer io.ReadWriter) io.ReadCloser { - reader := &bufReader{ - buf: buffer, - drainBuf: drainBuffer, - reader: r, - } - reader.wait.L = &reader.Mutex - go reader.drain() - return reader -} - -func (r *bufReader) drain() { - for { - //Call to scheduler is made to yield from this goroutine. - //This avoids goroutine looping here when n=0,err=nil, fixes code hangs when run with GCC Go. - callSchedulerIfNecessary() - n, err := r.reader.Read(r.drainBuf) - r.Lock() - if err != nil { - r.err = err - } else { - if n == 0 { - // nothing written, no need to signal - r.Unlock() - continue - } - r.buf.Write(r.drainBuf[:n]) - } - r.wait.Signal() - r.Unlock() - if err != nil { - break - } - } -} - -func (r *bufReader) Read(p []byte) (n int, err error) { - r.Lock() - defer r.Unlock() - for { - n, err = r.buf.Read(p) - if n > 0 { - return n, err - } - if r.err != nil { - return 0, r.err - } - r.wait.Wait() - } -} - -// Close closes the bufReader -func (r *bufReader) Close() error { - closer, ok := r.reader.(io.ReadCloser) - if !ok { - return nil - } - return closer.Close() -} - // HashData returns the sha256 sum of src. func HashData(src io.Reader) (string, error) { h := sha256.New() @@ -168,3 +83,72 @@ func (r *OnEOFReader) runFunc() { r.Fn = nil } } + +// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read +// operations. +type cancelReadCloser struct { + cancel func() + pR *io.PipeReader // Stream to read from + pW *io.PipeWriter +} + +// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the +// context is cancelled. The returned io.ReadCloser must be closed when it is +// no longer needed. +func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser { + pR, pW := io.Pipe() + + // Create a context used to signal when the pipe is closed + doneCtx, cancel := context.WithCancel(context.Background()) + + p := &cancelReadCloser{ + cancel: cancel, + pR: pR, + pW: pW, + } + + go func() { + _, err := io.Copy(pW, in) + select { + case <-ctx.Done(): + // If the context was closed, p.closeWithError + // was already called. Calling it again would + // change the error that Read returns. + default: + p.closeWithError(err) + } + in.Close() + }() + go func() { + for { + select { + case <-ctx.Done(): + p.closeWithError(ctx.Err()) + case <-doneCtx.Done(): + return + } + } + }() + + return p +} + +// Read wraps the Read method of the pipe that provides data from the wrapped +// ReadCloser. +func (p *cancelReadCloser) Read(buf []byte) (n int, err error) { + return p.pR.Read(buf) +} + +// closeWithError closes the wrapper and its underlying reader. It will +// cause future calls to Read to return err. +func (p *cancelReadCloser) closeWithError(err error) { + p.pW.CloseWithError(err) + p.cancel() +} + +// Close closes the wrapper its underlying reader. It will cause +// future calls to Read to return io.EOF. +func (p *cancelReadCloser) Close() error { + p.closeWithError(io.EOF) + return nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writeflusher.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writeflusher.go index 25095474d..2b35a2666 100644 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writeflusher.go +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writeflusher.go @@ -1,41 +1,86 @@ package ioutils import ( + "errors" "io" "net/http" "sync" ) +// WriteFlusher wraps the Write and Flush operation ensuring that every write +// is a flush. In addition, the Close method can be called to intercept +// Read/Write calls if the targets lifecycle has already ended. type WriteFlusher struct { - sync.Mutex + mu sync.Mutex w io.Writer flusher http.Flusher flushed bool + closed error + + // TODO(stevvooe): Use channel for closed instead, remove mutex. Using a + // channel will allow one to properly order the operations. } +var errWriteFlusherClosed = errors.New("writeflusher: closed") + func (wf *WriteFlusher) Write(b []byte) (n int, err error) { - wf.Lock() - defer wf.Unlock() + wf.mu.Lock() + defer wf.mu.Unlock() + if wf.closed != nil { + return 0, wf.closed + } + n, err = wf.w.Write(b) - wf.flushed = true - wf.flusher.Flush() + wf.flush() // every write is a flush. return n, err } // Flush the stream immediately. func (wf *WriteFlusher) Flush() { - wf.Lock() - defer wf.Unlock() + wf.mu.Lock() + defer wf.mu.Unlock() + + wf.flush() +} + +// flush the stream immediately without taking a lock. Used internally. +func (wf *WriteFlusher) flush() { + if wf.closed != nil { + return + } + wf.flushed = true wf.flusher.Flush() } +// Flushed returns the state of flushed. +// If it's flushed, return true, or else it return false. func (wf *WriteFlusher) Flushed() bool { - wf.Lock() - defer wf.Unlock() + // BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to + // be used to detect whether or a response code has been issued or not. + // Another hook should be used instead. + wf.mu.Lock() + defer wf.mu.Unlock() + return wf.flushed } +// Close closes the write flusher, disallowing any further writes to the +// target. After the flusher is closed, all calls to write or flush will +// result in an error. +func (wf *WriteFlusher) Close() error { + wf.mu.Lock() + defer wf.mu.Unlock() + + if wf.closed != nil { + return wf.closed + } + + wf.closed = errWriteFlusherClosed + return nil +} + +// NewWriteFlusher returns a new WriteFlusher. func NewWriteFlusher(w io.Writer) *WriteFlusher { var flusher http.Flusher if f, ok := w.(http.Flusher); ok { diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writers.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writers.go index 43fdc44ea..ccc7f9c23 100644 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writers.go +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils/writers.go @@ -2,6 +2,7 @@ package ioutils import "io" +// NopWriter represents a type which write operation is nop. type NopWriter struct{} func (*NopWriter) Write(buf []byte) (int, error) { @@ -14,12 +15,15 @@ type nopWriteCloser struct { func (w *nopWriteCloser) Close() error { return nil } +// NopWriteCloser returns a nopWriteCloser. func NopWriteCloser(w io.Writer) io.WriteCloser { return &nopWriteCloser{w} } +// NopFlusher represents a type which flush operation is nop. type NopFlusher struct{} +// Flush is a nop operation. func (f *NopFlusher) Flush() {} type writeCloserWrapper struct { @@ -31,6 +35,7 @@ func (r *writeCloserWrapper) Close() error { return r.closer() } +// NewWriteCloserWrapper returns a new io.WriteCloser. func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { return &writeCloserWrapper{ Writer: r, @@ -38,7 +43,7 @@ func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { } } -// Wrap a concrete io.Writer and hold a count of the number +// WriteCounter wraps a concrete io.Writer and hold a count of the number // of bytes written to the writer during a "session". // This can be convenient when write return is masked // (e.g., json.Encoder.Encode()) @@ -47,6 +52,7 @@ type WriteCounter struct { Writer io.Writer } +// NewWriteCounter returns a new WriteCounter. func NewWriteCounter(w io.Writer) *WriteCounter { return &WriteCounter{ Writer: w, diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers/parsers.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers/parsers.go deleted file mode 100644 index a604a9e12..000000000 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers/parsers.go +++ /dev/null @@ -1,259 +0,0 @@ -// Package parsers provides helper functions to parse and validate different type -// of string. It can be hosts, unix addresses, tcp addresses, filters, kernel -// operating system versions. -package parsers - -import ( - "fmt" - "net" - "net/url" - "path" - "runtime" - "strconv" - "strings" -) - -// ParseDockerDaemonHost parses the specified address and returns an address that will be used as the host. -// Depending of the address specified, will use the defaultTCPAddr or defaultUnixAddr -// defaultUnixAddr must be a absolute file path (no `unix://` prefix) -// defaultTCPAddr must be the full `tcp://host:port` form -func ParseDockerDaemonHost(defaultTCPAddr, defaultTLSHost, defaultUnixAddr, defaultAddr, addr string) (string, error) { - addr = strings.TrimSpace(addr) - if addr == "" { - if defaultAddr == defaultTLSHost { - return defaultTLSHost, nil - } - if runtime.GOOS != "windows" { - return fmt.Sprintf("unix://%s", defaultUnixAddr), nil - } - return defaultTCPAddr, nil - } - addrParts := strings.Split(addr, "://") - if len(addrParts) == 1 { - addrParts = []string{"tcp", addrParts[0]} - } - - switch addrParts[0] { - case "tcp": - return ParseTCPAddr(addrParts[1], defaultTCPAddr) - case "unix": - return ParseUnixAddr(addrParts[1], defaultUnixAddr) - case "fd": - return addr, nil - default: - return "", fmt.Errorf("Invalid bind address format: %s", addr) - } -} - -// ParseUnixAddr parses and validates that the specified address is a valid UNIX -// socket address. It returns a formatted UNIX socket address, either using the -// address parsed from addr, or the contents of defaultAddr if addr is a blank -// string. -func ParseUnixAddr(addr string, defaultAddr string) (string, error) { - addr = strings.TrimPrefix(addr, "unix://") - if strings.Contains(addr, "://") { - return "", fmt.Errorf("Invalid proto, expected unix: %s", addr) - } - if addr == "" { - addr = defaultAddr - } - return fmt.Sprintf("unix://%s", addr), nil -} - -// ParseTCPAddr parses and validates that the specified address is a valid TCP -// address. It returns a formatted TCP address, either using the address parsed -// from tryAddr, or the contents of defaultAddr if tryAddr is a blank string. -// tryAddr is expected to have already been Trim()'d -// defaultAddr must be in the full `tcp://host:port` form -func ParseTCPAddr(tryAddr string, defaultAddr string) (string, error) { - if tryAddr == "" || tryAddr == "tcp://" { - return defaultAddr, nil - } - addr := strings.TrimPrefix(tryAddr, "tcp://") - if strings.Contains(addr, "://") || addr == "" { - return "", fmt.Errorf("Invalid proto, expected tcp: %s", tryAddr) - } - - u, err := url.Parse("tcp://" + addr) - if err != nil { - return "", err - } - - host, port, err := net.SplitHostPort(u.Host) - if err != nil { - return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) - } - - defaultAddr = strings.TrimPrefix(defaultAddr, "tcp://") - defaultHost, defaultPort, err := net.SplitHostPort(defaultAddr) - if err != nil { - return "", err - } - - if host == "" { - host = defaultHost - } - if port == "" { - port = defaultPort - } - p, err := strconv.Atoi(port) - if err != nil && p == 0 { - return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) - } - - if net.ParseIP(host).To4() == nil && strings.Contains(host, ":") { - // This is either an ipv6 address - host = "[" + host + "]" - } - return fmt.Sprintf("tcp://%s:%d%s", host, p, u.Path), nil -} - -// ParseRepositoryTag gets a repos name and returns the right reposName + tag|digest -// The tag can be confusing because of a port in a repository name. -// Ex: localhost.localdomain:5000/samalba/hipache:latest -// Digest ex: localhost:5000/foo/bar@sha256:bc8813ea7b3603864987522f02a76101c17ad122e1c46d790efc0fca78ca7bfb -func ParseRepositoryTag(repos string) (string, string) { - n := strings.Index(repos, "@") - if n >= 0 { - parts := strings.Split(repos, "@") - return parts[0], parts[1] - } - n = strings.LastIndex(repos, ":") - if n < 0 { - return repos, "" - } - if tag := repos[n+1:]; !strings.Contains(tag, "/") { - return repos[:n], tag - } - return repos, "" -} - -// PartParser parses and validates the specified string (data) using the specified template -// e.g. ip:public:private -> 192.168.0.1:80:8000 -func PartParser(template, data string) (map[string]string, error) { - // ip:public:private - var ( - templateParts = strings.Split(template, ":") - parts = strings.Split(data, ":") - out = make(map[string]string, len(templateParts)) - ) - if len(parts) != len(templateParts) { - return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template) - } - - for i, t := range templateParts { - value := "" - if len(parts) > i { - value = parts[i] - } - out[t] = value - } - return out, nil -} - -// ParseKeyValueOpt parses and validates the specified string as a key/value pair (key=value) -func ParseKeyValueOpt(opt string) (string, string, error) { - parts := strings.SplitN(opt, "=", 2) - if len(parts) != 2 { - return "", "", fmt.Errorf("Unable to parse key/value option: %s", opt) - } - return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil -} - -// ParsePortRange parses and validates the specified string as a port-range (8000-9000) -func ParsePortRange(ports string) (uint64, uint64, error) { - if ports == "" { - return 0, 0, fmt.Errorf("Empty string specified for ports.") - } - if !strings.Contains(ports, "-") { - start, err := strconv.ParseUint(ports, 10, 16) - end := start - return start, end, err - } - - parts := strings.Split(ports, "-") - start, err := strconv.ParseUint(parts[0], 10, 16) - if err != nil { - return 0, 0, err - } - end, err := strconv.ParseUint(parts[1], 10, 16) - if err != nil { - return 0, 0, err - } - if end < start { - return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports) - } - return start, end, nil -} - -// ParseLink parses and validates the specified string as a link format (name:alias) -func ParseLink(val string) (string, string, error) { - if val == "" { - return "", "", fmt.Errorf("empty string specified for links") - } - arr := strings.Split(val, ":") - if len(arr) > 2 { - return "", "", fmt.Errorf("bad format for links: %s", val) - } - if len(arr) == 1 { - return val, val, nil - } - // This is kept because we can actually get an HostConfig with links - // from an already created container and the format is not `foo:bar` - // but `/foo:/c1/bar` - if strings.HasPrefix(arr[0], "/") { - _, alias := path.Split(arr[1]) - return arr[0][1:], alias, nil - } - return arr[0], arr[1], nil -} - -// ParseUintList parses and validates the specified string as the value -// found in some cgroup file (e.g. `cpuset.cpus`, `cpuset.mems`), which could be -// one of the formats below. Note that duplicates are actually allowed in the -// input string. It returns a `map[int]bool` with available elements from `val` -// set to `true`. -// Supported formats: -// 7 -// 1-6 -// 0,3-4,7,8-10 -// 0-0,0,1-7 -// 03,1-3 <- this is gonna get parsed as [1,2,3] -// 3,2,1 -// 0-2,3,1 -func ParseUintList(val string) (map[int]bool, error) { - if val == "" { - return map[int]bool{}, nil - } - - availableInts := make(map[int]bool) - split := strings.Split(val, ",") - errInvalidFormat := fmt.Errorf("invalid format: %s", val) - - for _, r := range split { - if !strings.Contains(r, "-") { - v, err := strconv.Atoi(r) - if err != nil { - return nil, errInvalidFormat - } - availableInts[v] = true - } else { - split := strings.SplitN(r, "-", 2) - min, err := strconv.Atoi(split[0]) - if err != nil { - return nil, errInvalidFormat - } - max, err := strconv.Atoi(split[1]) - if err != nil { - return nil, errInvalidFormat - } - if max < min { - return nil, errInvalidFormat - } - for i := min; i <= max; i++ { - availableInts[i] = true - } - } - } - return availableInts, nil -} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/chtimes.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/chtimes.go index 31ed9ff10..acf3f566f 100644 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/chtimes.go +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/chtimes.go @@ -2,14 +2,30 @@ package system import ( "os" + "syscall" "time" + "unsafe" ) +var ( + maxTime time.Time +) + +func init() { + if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { + // This is a 64 bit timespec + // os.Chtimes limits time to the following + maxTime = time.Unix(0, 1<<63-1) + } else { + // This is a 32 bit timespec + maxTime = time.Unix(1<<31-1, 0) + } +} + // Chtimes changes the access time and modified time of a file at the given path func Chtimes(name string, atime time.Time, mtime time.Time) error { unixMinTime := time.Unix(0, 0) - // The max Unix time is 33 bits set - unixMaxTime := unixMinTime.Add((1<<33 - 1) * time.Second) + unixMaxTime := maxTime // If the modified time is prior to the Unix Epoch, or after the // end of Unix Time, os.Chtimes has undefined behavior diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_linux.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_linux.go index 9d83304ff..c14dbf376 100644 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_linux.go +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/meminfo_linux.go @@ -7,7 +7,7 @@ import ( "strconv" "strings" - "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units" + "github.com/fsouza/go-dockerclient/external/github.com/docker/go-units" ) // ReadMemInfo retrieves memory statistics of the host system and returns a diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/path_unix.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/path_unix.go new file mode 100644 index 000000000..1b6cc9cbd --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/path_unix.go @@ -0,0 +1,8 @@ +// +build !windows + +package system + +// DefaultPathEnv is unix style list of directories to search for +// executables. Each directory is separated from the next by a colon +// ':' character . +const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/path_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/path_windows.go new file mode 100644 index 000000000..09e7f89fe --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/path_windows.go @@ -0,0 +1,7 @@ +// +build windows + +package system + +// DefaultPathEnv is deliberately empty on Windows as the default path will be set by +// the container. Docker has no context of what the default path should be. +const DefaultPathEnv = "" diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_solaris.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_solaris.go new file mode 100644 index 000000000..b01d08acf --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_solaris.go @@ -0,0 +1,17 @@ +// +build solaris + +package system + +import ( + "syscall" +) + +// fromStatT creates a system.StatT type from a syscall.Stat_t type +func fromStatT(s *syscall.Stat_t) (*StatT, error) { + return &StatT{size: s.Size, + mode: uint32(s.Mode), + uid: s.Uid, + gid: s.Gid, + rdev: uint64(s.Rdev), + mtim: s.Mtim}, nil +} diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_unsupported.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_unsupported.go index 381ea8211..c6075d4ff 100644 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_unsupported.go +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/stat_unsupported.go @@ -1,4 +1,4 @@ -// +build !linux,!windows,!freebsd +// +build !linux,!windows,!freebsd,!solaris package system diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/syscall_unix.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/syscall_unix.go index 50054765a..f1497c587 100644 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/syscall_unix.go +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/syscall_unix.go @@ -6,6 +6,6 @@ import "syscall" // Unmount is a platform-specific helper function to call // the unmount syscall. -func Unmount(dest string) { - syscall.Unmount(dest, 0) +func Unmount(dest string) error { + return syscall.Unmount(dest, 0) } diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/syscall_windows.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/syscall_windows.go index 3a3a55b26..273aa234b 100644 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/syscall_windows.go +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/syscall_windows.go @@ -1,6 +1,36 @@ package system +import ( + "fmt" + "syscall" +) + +// OSVersion is a wrapper for Windows version information +// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx +type OSVersion struct { + Version uint32 + MajorVersion uint8 + MinorVersion uint8 + Build uint16 +} + +// GetOSVersion gets the operating system version on Windows. Note that +// docker.exe must be manifested to get the correct version information. +func GetOSVersion() (OSVersion, error) { + var err error + osv := OSVersion{} + osv.Version, err = syscall.GetVersion() + if err != nil { + return osv, fmt.Errorf("Failed to call GetVersion()") + } + osv.MajorVersion = uint8(osv.Version & 0xFF) + osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) + osv.Build = uint16(osv.Version >> 16) + return osv, nil +} + // Unmount is a platform-specific helper function to call // the unmount syscall. Not supported on Windows -func Unmount(dest string) { +func Unmount(dest string) error { + return nil } diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_linux.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_linux.go index 007bfa8c0..fc8a1aba9 100644 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_linux.go +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system/utimes_linux.go @@ -5,7 +5,7 @@ import ( "unsafe" ) -// LUtimesNano is used to change access and modification time of the speficied path. +// LUtimesNano is used to change access and modification time of the specified path. // It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm. func LUtimesNano(path string, ts []syscall.Timespec) error { // These are not currently available in syscall diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/CONTRIBUTING.md b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/CONTRIBUTING.md new file mode 100644 index 000000000..9ea86d784 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/CONTRIBUTING.md @@ -0,0 +1,67 @@ +# Contributing to go-units + +Want to hack on go-units? Awesome! Here are instructions to get you started. + +go-units is a part of the [Docker](https://www.docker.com) project, and follows +the same rules and principles. If you're already familiar with the way +Docker does things, you'll feel right at home. + +Otherwise, go read Docker's +[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), +[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), +[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and +[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). + +### Sign your work + +The sign-off is a simple line at the end of the explanation for the patch. Your +signature certifies that you wrote the patch or otherwise have the right to pass +it on as an open-source patch. The rules are pretty simple: if you can certify +the below (from [developercertificate.org](http://developercertificate.org/)): + +``` +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. +``` + +Then you just add a line to every git commit message: + + Signed-off-by: Joe Smith + +Use your real name (sorry, no pseudonyms or anonymous contributions.) + +If you set your `user.name` and `user.email` git configs, you can sign your +commit automatically with `git commit -s`. diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/LICENSE.code b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/LICENSE.code new file mode 100644 index 000000000..b55b37bc3 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/LICENSE.code @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2015 Docker, Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/LICENSE.docs b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/LICENSE.docs new file mode 100644 index 000000000..e26cd4fc8 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/LICENSE.docs @@ -0,0 +1,425 @@ +Attribution-ShareAlike 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More_considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution-ShareAlike 4.0 International Public +License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution-ShareAlike 4.0 International Public License ("Public +License"). To the extent this Public License may be interpreted as a +contract, You are granted the Licensed Rights in consideration of Your +acceptance of these terms and conditions, and the Licensor grants You +such rights in consideration of benefits the Licensor receives from +making the Licensed Material available under these terms and +conditions. + + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. BY-SA Compatible License means a license listed at + creativecommons.org/compatiblelicenses, approved by Creative + Commons as essentially the equivalent of this Public License. + + d. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + + e. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + f. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + g. License Elements means the license attributes listed in the name + of a Creative Commons Public License. The License Elements of this + Public License are Attribution and ShareAlike. + + h. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + i. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + j. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + k. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + l. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + m. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part; and + + b. produce, reproduce, and Share Adapted Material. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. Additional offer from the Licensor -- Adapted Material. + Every recipient of Adapted Material from You + automatically receives an offer from the Licensor to + exercise the Licensed Rights in the Adapted Material + under the conditions of the Adapter's License You apply. + + c. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties. + + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + b. ShareAlike. + + In addition to the conditions in Section 3(a), if You Share + Adapted Material You produce, the following conditions also apply. + + 1. The Adapter's License You apply must be a Creative Commons + license with the same License Elements, this version or + later, or a BY-SA Compatible License. + + 2. You must include the text of, or the URI or hyperlink to, the + Adapter's License You apply. You may satisfy this condition + in any reasonable manner based on the medium, means, and + context in which You Share Adapted Material. + + 3. You may not offer or impose any additional or different terms + or conditions on, or apply any Effective Technological + Measures to, Adapted Material that restrict exercise of the + rights granted under the Adapter's License You apply. + + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material, + + including for purposes of Section 3(b); and + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + + +======================================================================= + +Creative Commons is not a party to its public licenses. +Notwithstanding, Creative Commons may elect to apply one of its public +licenses to material it publishes and in those instances will be +considered the "Licensor." Except for the limited purpose of indicating +that material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the public +licenses. + +Creative Commons may be contacted at creativecommons.org. diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/MAINTAINERS b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/MAINTAINERS new file mode 100644 index 000000000..477be8b21 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/MAINTAINERS @@ -0,0 +1,27 @@ +# go-connections maintainers file +# +# This file describes who runs the docker/go-connections project and how. +# This is a living document - if you see something out of date or missing, speak up! +# +# It is structured to be consumable by both humans and programs. +# To extract its contents programmatically, use any TOML-compliant parser. +# +# This file is compiled into the MAINTAINERS file in docker/opensource. +# +[Org] + [Org."Core maintainers"] + people = [ + "calavera", + ] + +[people] + +# A reference list of all people associated with the project. +# All other sections should refer to people by their canonical key +# in the people section. + + # ADD YOURSELF HERE IN ALPHABETICAL ORDER + [people.calavera] + Name = "David Calavera" + Email = "david.calavera@gmail.com" + GitHub = "calavera" diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/README.md b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/README.md new file mode 100644 index 000000000..3ce4d79da --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/README.md @@ -0,0 +1,18 @@ +[![GoDoc](https://godoc.org/github.com/docker/go-units?status.svg)](https://godoc.org/github.com/docker/go-units) + +# Introduction + +go-units is a library to transform human friendly measurements into machine friendly values. + +## Usage + +See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation. + +## Copyright and license + +Copyright © 2015 Docker, Inc. All rights reserved, except as follows. Code +is released under the Apache 2.0 license. The README.md file, and files in the +"docs" folder are licensed under the Creative Commons Attribution 4.0 +International License under the terms and conditions set forth in the file +"LICENSE.docs". You may obtain a duplicate copy of the same license, titled +CC-BY-SA-4.0, at http://creativecommons.org/licenses/by/4.0/. diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/circle.yml b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/circle.yml new file mode 100644 index 000000000..9043b3547 --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/circle.yml @@ -0,0 +1,11 @@ +dependencies: + post: + # install golint + - go get github.com/golang/lint/golint + +test: + pre: + # run analysis before tests + - go vet ./... + - test -z "$(golint ./... | tee /dev/stderr)" + - test -z "$(gofmt -s -l . | tee /dev/stderr)" diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/duration.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/duration.go similarity index 100% rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/duration.go rename to vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/duration.go diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/size.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/size.go similarity index 97% rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/size.go rename to vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/size.go index 2fde3b412..3b59daff3 100644 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units/size.go +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/size.go @@ -49,7 +49,7 @@ func CustomSize(format string, size float64, base float64, _map []string) string } // HumanSize returns a human-readable approximation of a size -// using SI standard (eg. "44kB", "17MB"). +// capped at 4 valid numbers (eg. "2.746 MB", "796 KB"). func HumanSize(size float64) string { return CustomSize("%.4g %s", size, 1000.0, decimapAbbrs) } diff --git a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit/ulimit.go b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/ulimit.go similarity index 77% rename from vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit/ulimit.go rename to vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/ulimit.go index 8fb0d804d..5ac7fd825 100644 --- a/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit/ulimit.go +++ b/vendor/github.com/fsouza/go-dockerclient/external/github.com/docker/go-units/ulimit.go @@ -1,6 +1,4 @@ -// Package ulimit provides structure and helper function to parse and represent -// resource limits (Rlimit and Ulimit, its human friendly version). -package ulimit +package units import ( "fmt" @@ -64,8 +62,8 @@ var ulimitNameMapping = map[string]int{ "stack": rlimitStack, } -// Parse parses and returns a Ulimit from the specified string. -func Parse(val string) (*Ulimit, error) { +// ParseUlimit parses and returns a Ulimit from the specified string. +func ParseUlimit(val string) (*Ulimit, error) { parts := strings.SplitN(val, "=", 2) if len(parts) != 2 { return nil, fmt.Errorf("invalid ulimit argument: %s", val) @@ -75,25 +73,34 @@ func Parse(val string) (*Ulimit, error) { return nil, fmt.Errorf("invalid ulimit type: %s", parts[0]) } - limitVals := strings.SplitN(parts[1], ":", 2) - if len(limitVals) > 2 { + var ( + soft int64 + hard = &soft // default to soft in case no hard was set + temp int64 + err error + ) + switch limitVals := strings.Split(parts[1], ":"); len(limitVals) { + case 2: + temp, err = strconv.ParseInt(limitVals[1], 10, 64) + if err != nil { + return nil, err + } + hard = &temp + fallthrough + case 1: + soft, err = strconv.ParseInt(limitVals[0], 10, 64) + if err != nil { + return nil, err + } + default: return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1]) } - soft, err := strconv.ParseInt(limitVals[0], 10, 64) - if err != nil { - return nil, err + if soft > *hard { + return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, *hard) } - hard := soft // in case no hard was set - if len(limitVals) == 2 { - hard, err = strconv.ParseInt(limitVals[1], 10, 64) - } - if soft > hard { - return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, hard) - } - - return &Ulimit{Name: parts[0], Soft: soft, Hard: hard}, nil + return &Ulimit{Name: parts[0], Soft: soft, Hard: *hard}, nil } // GetRlimit returns the RLimit corresponding to Ulimit. diff --git a/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/net/context/context.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/net/context/context.go new file mode 100644 index 000000000..0da42b1df --- /dev/null +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/net/context/context.go @@ -0,0 +1,447 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package context defines the Context type, which carries deadlines, +// cancelation signals, and other request-scoped values across API boundaries +// and between processes. +// +// Incoming requests to a server should create a Context, and outgoing calls to +// servers should accept a Context. The chain of function calls between must +// propagate the Context, optionally replacing it with a modified copy created +// using WithDeadline, WithTimeout, WithCancel, or WithValue. +// +// Programs that use Contexts should follow these rules to keep interfaces +// consistent across packages and enable static analysis tools to check context +// propagation: +// +// Do not store Contexts inside a struct type; instead, pass a Context +// explicitly to each function that needs it. The Context should be the first +// parameter, typically named ctx: +// +// func DoSomething(ctx context.Context, arg Arg) error { +// // ... use ctx ... +// } +// +// Do not pass a nil Context, even if a function permits it. Pass context.TODO +// if you are unsure about which Context to use. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +// +// The same Context may be passed to functions running in different goroutines; +// Contexts are safe for simultaneous use by multiple goroutines. +// +// See http://blog.golang.org/context for example code for a server that uses +// Contexts. +package context // import "github.com/fsouza/go-dockerclient/external/golang.org/x/net/context" + +import ( + "errors" + "fmt" + "sync" + "time" +) + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + // + // WithCancel arranges for Done to be closed when cancel is called; + // WithDeadline arranges for Done to be closed when the deadline + // expires; WithTimeout arranges for Done to be closed when the timeout + // elapses. + // + // Done is provided for use in select statements: + // + // // Stream generates values with DoSomething and sends them to out + // // until DoSomething returns an error or ctx.Done is closed. + // func Stream(ctx context.Context, out <-chan Value) error { + // for { + // v, err := DoSomething(ctx) + // if err != nil { + // return err + // } + // select { + // case <-ctx.Done(): + // return ctx.Err() + // case out <- v: + // } + // } + // } + // + // See http://blog.golang.org/pipelines for more examples of how to use + // a Done channel for cancelation. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + // + // A key identifies a specific value in a Context. Functions that wish + // to store values in Context typically allocate a key in a global + // variable then use that key as the argument to context.WithValue and + // Context.Value. A key can be any type that supports equality; + // packages should define keys as an unexported type to avoid + // collisions. + // + // Packages that define a Context key should provide type-safe accessors + // for the values stores using that key: + // + // // Package user defines a User type that's stored in Contexts. + // package user + // + // import "golang.org/x/net/context" + // + // // User is the type of value stored in the Contexts. + // type User struct {...} + // + // // key is an unexported type for keys defined in this package. + // // This prevents collisions with keys defined in other packages. + // type key int + // + // // userKey is the key for user.User values in Contexts. It is + // // unexported; clients use user.NewContext and user.FromContext + // // instead of using this key directly. + // var userKey key = 0 + // + // // NewContext returns a new Context that carries value u. + // func NewContext(ctx context.Context, u *User) context.Context { + // return context.WithValue(ctx, userKey, u) + // } + // + // // FromContext returns the User value stored in ctx, if any. + // func FromContext(ctx context.Context) (*User, bool) { + // u, ok := ctx.Value(userKey).(*User) + // return u, ok + // } + Value(key interface{}) interface{} +} + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = errors.New("context canceled") + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = errors.New("context deadline exceeded") + +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case background: + return "context.Background" + case todo: + return "context.TODO" + } + return "unknown empty Context" +} + +var ( + background = new(emptyCtx) + todo = new(emptyCtx) +) + +// Background returns a non-nil, empty Context. It is never canceled, has no +// values, and has no deadline. It is typically used by the main function, +// initialization, and tests, and as the top-level Context for incoming +// requests. +func Background() Context { + return background +} + +// TODO returns a non-nil, empty Context. Code should use context.TODO when +// it's unclear which Context to use or it is not yet available (because the +// surrounding function has not yet been extended to accept a Context +// parameter). TODO is recognized by static analysis tools that determine +// whether Contexts are propagated correctly in a program. +func TODO() Context { + return todo +} + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc func() + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + c := newCancelCtx(parent) + propagateCancel(parent, &c) + return &c, func() { c.cancel(true, Canceled) } +} + +// newCancelCtx returns an initialized cancelCtx. +func newCancelCtx(parent Context) cancelCtx { + return cancelCtx{ + Context: parent, + done: make(chan struct{}), + } +} + +// propagateCancel arranges for child to be canceled when parent is. +func propagateCancel(parent Context, child canceler) { + if parent.Done() == nil { + return // parent is never canceled + } + if p, ok := parentCancelCtx(parent); ok { + p.mu.Lock() + if p.err != nil { + // parent has already been canceled + child.cancel(false, p.err) + } else { + if p.children == nil { + p.children = make(map[canceler]bool) + } + p.children[child] = true + } + p.mu.Unlock() + } else { + go func() { + select { + case <-parent.Done(): + child.cancel(false, parent.Err()) + case <-child.Done(): + } + }() + } +} + +// parentCancelCtx follows a chain of parent references until it finds a +// *cancelCtx. This function understands how each of the concrete types in this +// package represents its parent. +func parentCancelCtx(parent Context) (*cancelCtx, bool) { + for { + switch c := parent.(type) { + case *cancelCtx: + return c, true + case *timerCtx: + return &c.cancelCtx, true + case *valueCtx: + parent = c.Context + default: + return nil, false + } + } +} + +// removeChild removes a context from its parent. +func removeChild(parent Context, child canceler) { + p, ok := parentCancelCtx(parent) + if !ok { + return + } + p.mu.Lock() + if p.children != nil { + delete(p.children, child) + } + p.mu.Unlock() +} + +// A canceler is a context type that can be canceled directly. The +// implementations are *cancelCtx and *timerCtx. +type canceler interface { + cancel(removeFromParent bool, err error) + Done() <-chan struct{} +} + +// A cancelCtx can be canceled. When canceled, it also cancels any children +// that implement canceler. +type cancelCtx struct { + Context + + done chan struct{} // closed by the first cancel call. + + mu sync.Mutex + children map[canceler]bool // set to nil by the first cancel call + err error // set to non-nil by the first cancel call +} + +func (c *cancelCtx) Done() <-chan struct{} { + return c.done +} + +func (c *cancelCtx) Err() error { + c.mu.Lock() + defer c.mu.Unlock() + return c.err +} + +func (c *cancelCtx) String() string { + return fmt.Sprintf("%v.WithCancel", c.Context) +} + +// cancel closes c.done, cancels each of c's children, and, if +// removeFromParent is true, removes c from its parent's children. +func (c *cancelCtx) cancel(removeFromParent bool, err error) { + if err == nil { + panic("context: internal error: missing cancel error") + } + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return // already canceled + } + c.err = err + close(c.done) + for child := range c.children { + // NOTE: acquiring the child's lock while holding parent's lock. + child.cancel(false, err) + } + c.children = nil + c.mu.Unlock() + + if removeFromParent { + removeChild(c.Context, c) + } +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { + // The current deadline is already sooner than the new one. + return WithCancel(parent) + } + c := &timerCtx{ + cancelCtx: newCancelCtx(parent), + deadline: deadline, + } + propagateCancel(parent, c) + d := deadline.Sub(time.Now()) + if d <= 0 { + c.cancel(true, DeadlineExceeded) // deadline has already passed + return c, func() { c.cancel(true, Canceled) } + } + c.mu.Lock() + defer c.mu.Unlock() + if c.err == nil { + c.timer = time.AfterFunc(d, func() { + c.cancel(true, DeadlineExceeded) + }) + } + return c, func() { c.cancel(true, Canceled) } +} + +// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to +// implement Done and Err. It implements cancel by stopping its timer then +// delegating to cancelCtx.cancel. +type timerCtx struct { + cancelCtx + timer *time.Timer // Under cancelCtx.mu. + + deadline time.Time +} + +func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { + return c.deadline, true +} + +func (c *timerCtx) String() string { + return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) +} + +func (c *timerCtx) cancel(removeFromParent bool, err error) { + c.cancelCtx.cancel(false, err) + if removeFromParent { + // Remove this timerCtx from its parent cancelCtx's children. + removeChild(c.cancelCtx.Context, c) + } + c.mu.Lock() + if c.timer != nil { + c.timer.Stop() + c.timer = nil + } + c.mu.Unlock() +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return &valueCtx{parent, key, val} +} + +// A valueCtx carries a key-value pair. It implements Value for that key and +// delegates all other calls to the embedded Context. +type valueCtx struct { + Context + key, val interface{} +} + +func (c *valueCtx) String() string { + return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) +} + +func (c *valueCtx) Value(key interface{}) interface{} { + if c.key == key { + return c.val + } + return c.Context.Value(key) +} diff --git a/vendor/golang.org/x/sys/unix/asm.s b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm.s similarity index 100% rename from vendor/golang.org/x/sys/unix/asm.s rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm.s diff --git a/vendor/golang.org/x/sys/unix/asm_darwin_386.s b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_darwin_386.s similarity index 100% rename from vendor/golang.org/x/sys/unix/asm_darwin_386.s rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_darwin_386.s diff --git a/vendor/golang.org/x/sys/unix/asm_darwin_amd64.s b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_darwin_amd64.s similarity index 100% rename from vendor/golang.org/x/sys/unix/asm_darwin_amd64.s rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_darwin_amd64.s diff --git a/vendor/golang.org/x/sys/unix/asm_darwin_arm.s b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_darwin_arm.s similarity index 100% rename from vendor/golang.org/x/sys/unix/asm_darwin_arm.s rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_darwin_arm.s diff --git a/vendor/golang.org/x/sys/unix/asm_darwin_arm64.s b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_darwin_arm64.s similarity index 100% rename from vendor/golang.org/x/sys/unix/asm_darwin_arm64.s rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_darwin_arm64.s diff --git a/vendor/golang.org/x/sys/unix/asm_dragonfly_386.s b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_dragonfly_386.s similarity index 100% rename from vendor/golang.org/x/sys/unix/asm_dragonfly_386.s rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_dragonfly_386.s diff --git a/vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_dragonfly_amd64.s similarity index 100% rename from vendor/golang.org/x/sys/unix/asm_dragonfly_amd64.s rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_dragonfly_amd64.s diff --git a/vendor/golang.org/x/sys/unix/asm_freebsd_386.s b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_freebsd_386.s similarity index 100% rename from vendor/golang.org/x/sys/unix/asm_freebsd_386.s rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_freebsd_386.s diff --git a/vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_freebsd_amd64.s similarity index 100% rename from vendor/golang.org/x/sys/unix/asm_freebsd_amd64.s rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_freebsd_amd64.s diff --git a/vendor/golang.org/x/sys/unix/asm_freebsd_arm.s b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_freebsd_arm.s similarity index 100% rename from vendor/golang.org/x/sys/unix/asm_freebsd_arm.s rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_freebsd_arm.s diff --git a/vendor/golang.org/x/sys/unix/asm_linux_386.s b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_linux_386.s similarity index 100% rename from vendor/golang.org/x/sys/unix/asm_linux_386.s rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_linux_386.s diff --git a/vendor/golang.org/x/sys/unix/asm_linux_amd64.s b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_linux_amd64.s similarity index 100% rename from vendor/golang.org/x/sys/unix/asm_linux_amd64.s rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_linux_amd64.s diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm.s b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_linux_arm.s similarity index 100% rename from vendor/golang.org/x/sys/unix/asm_linux_arm.s rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_linux_arm.s diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm64.s b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_linux_arm64.s similarity index 100% rename from vendor/golang.org/x/sys/unix/asm_linux_arm64.s rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_linux_arm64.s diff --git a/vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_linux_ppc64x.s similarity index 100% rename from vendor/golang.org/x/sys/unix/asm_linux_ppc64x.s rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_linux_ppc64x.s diff --git a/vendor/golang.org/x/sys/unix/asm_netbsd_386.s b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_netbsd_386.s similarity index 100% rename from vendor/golang.org/x/sys/unix/asm_netbsd_386.s rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_netbsd_386.s diff --git a/vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_netbsd_amd64.s similarity index 100% rename from vendor/golang.org/x/sys/unix/asm_netbsd_amd64.s rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_netbsd_amd64.s diff --git a/vendor/golang.org/x/sys/unix/asm_netbsd_arm.s b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_netbsd_arm.s similarity index 100% rename from vendor/golang.org/x/sys/unix/asm_netbsd_arm.s rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_netbsd_arm.s diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_386.s b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_openbsd_386.s similarity index 100% rename from vendor/golang.org/x/sys/unix/asm_openbsd_386.s rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_openbsd_386.s diff --git a/vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_openbsd_amd64.s similarity index 100% rename from vendor/golang.org/x/sys/unix/asm_openbsd_amd64.s rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_openbsd_amd64.s diff --git a/vendor/golang.org/x/sys/unix/asm_solaris_amd64.s b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_solaris_amd64.s similarity index 100% rename from vendor/golang.org/x/sys/unix/asm_solaris_amd64.s rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/asm_solaris_amd64.s diff --git a/vendor/golang.org/x/sys/unix/constants.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/constants.go similarity index 100% rename from vendor/golang.org/x/sys/unix/constants.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/constants.go diff --git a/vendor/golang.org/x/sys/unix/env_unix.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/env_unix.go similarity index 100% rename from vendor/golang.org/x/sys/unix/env_unix.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/env_unix.go diff --git a/vendor/golang.org/x/sys/unix/env_unset.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/env_unset.go similarity index 100% rename from vendor/golang.org/x/sys/unix/env_unset.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/env_unset.go diff --git a/vendor/golang.org/x/sys/unix/flock.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/flock.go similarity index 100% rename from vendor/golang.org/x/sys/unix/flock.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/flock.go diff --git a/vendor/golang.org/x/sys/unix/flock_linux_32bit.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/flock_linux_32bit.go similarity index 100% rename from vendor/golang.org/x/sys/unix/flock_linux_32bit.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/flock_linux_32bit.go diff --git a/vendor/golang.org/x/sys/unix/gccgo.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/gccgo.go similarity index 100% rename from vendor/golang.org/x/sys/unix/gccgo.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/gccgo.go diff --git a/vendor/golang.org/x/sys/unix/gccgo_c.c b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/gccgo_c.c similarity index 100% rename from vendor/golang.org/x/sys/unix/gccgo_c.c rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/gccgo_c.c diff --git a/vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/gccgo_linux_amd64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/gccgo_linux_amd64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/gccgo_linux_amd64.go diff --git a/vendor/golang.org/x/sys/unix/mkall.sh b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mkall.sh similarity index 100% rename from vendor/golang.org/x/sys/unix/mkall.sh rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mkall.sh diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mkerrors.sh similarity index 100% rename from vendor/golang.org/x/sys/unix/mkerrors.sh rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mkerrors.sh diff --git a/vendor/golang.org/x/sys/unix/mksyscall.pl b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksyscall.pl similarity index 100% rename from vendor/golang.org/x/sys/unix/mksyscall.pl rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksyscall.pl diff --git a/vendor/golang.org/x/sys/unix/mksyscall_solaris.pl b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksyscall_solaris.pl similarity index 100% rename from vendor/golang.org/x/sys/unix/mksyscall_solaris.pl rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksyscall_solaris.pl diff --git a/vendor/golang.org/x/sys/unix/mksysctl_openbsd.pl b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksysctl_openbsd.pl similarity index 100% rename from vendor/golang.org/x/sys/unix/mksysctl_openbsd.pl rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksysctl_openbsd.pl diff --git a/vendor/golang.org/x/sys/unix/mksysnum_darwin.pl b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksysnum_darwin.pl similarity index 100% rename from vendor/golang.org/x/sys/unix/mksysnum_darwin.pl rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksysnum_darwin.pl diff --git a/vendor/golang.org/x/sys/unix/mksysnum_dragonfly.pl b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksysnum_dragonfly.pl similarity index 100% rename from vendor/golang.org/x/sys/unix/mksysnum_dragonfly.pl rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksysnum_dragonfly.pl diff --git a/vendor/golang.org/x/sys/unix/mksysnum_freebsd.pl b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksysnum_freebsd.pl similarity index 100% rename from vendor/golang.org/x/sys/unix/mksysnum_freebsd.pl rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksysnum_freebsd.pl diff --git a/vendor/golang.org/x/sys/unix/mksysnum_linux.pl b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksysnum_linux.pl similarity index 100% rename from vendor/golang.org/x/sys/unix/mksysnum_linux.pl rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksysnum_linux.pl diff --git a/vendor/golang.org/x/sys/unix/mksysnum_netbsd.pl b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksysnum_netbsd.pl similarity index 100% rename from vendor/golang.org/x/sys/unix/mksysnum_netbsd.pl rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksysnum_netbsd.pl diff --git a/vendor/golang.org/x/sys/unix/mksysnum_openbsd.pl b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksysnum_openbsd.pl similarity index 100% rename from vendor/golang.org/x/sys/unix/mksysnum_openbsd.pl rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/mksysnum_openbsd.pl diff --git a/vendor/golang.org/x/sys/unix/race.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/race.go similarity index 100% rename from vendor/golang.org/x/sys/unix/race.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/race.go diff --git a/vendor/golang.org/x/sys/unix/race0.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/race0.go similarity index 100% rename from vendor/golang.org/x/sys/unix/race0.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/race0.go diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_linux.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/sockcmsg_linux.go similarity index 100% rename from vendor/golang.org/x/sys/unix/sockcmsg_linux.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/sockcmsg_linux.go diff --git a/vendor/golang.org/x/sys/unix/sockcmsg_unix.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/sockcmsg_unix.go similarity index 100% rename from vendor/golang.org/x/sys/unix/sockcmsg_unix.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/sockcmsg_unix.go diff --git a/vendor/golang.org/x/sys/unix/str.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/str.go similarity index 100% rename from vendor/golang.org/x/sys/unix/str.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/str.go diff --git a/vendor/golang.org/x/sys/unix/syscall.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall.go similarity index 96% rename from vendor/golang.org/x/sys/unix/syscall.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall.go index 6442a9939..012f2d64f 100644 --- a/vendor/golang.org/x/sys/unix/syscall.go +++ b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall.go @@ -19,7 +19,7 @@ // These calls return err == nil to indicate success; otherwise // err represents an operating system error describing the failure and // holds a value of type syscall.Errno. -package unix // import "golang.org/x/sys/unix" +package unix // import "github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix" import "unsafe" diff --git a/vendor/golang.org/x/sys/unix/syscall_bsd.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_bsd.go similarity index 100% rename from vendor/golang.org/x/sys/unix/syscall_bsd.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_bsd.go diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_darwin.go similarity index 100% rename from vendor/golang.org/x/sys/unix/syscall_darwin.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_darwin.go diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_darwin_386.go similarity index 100% rename from vendor/golang.org/x/sys/unix/syscall_darwin_386.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_darwin_386.go diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_darwin_amd64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/syscall_darwin_amd64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_darwin_amd64.go diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_darwin_arm.go similarity index 100% rename from vendor/golang.org/x/sys/unix/syscall_darwin_arm.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_darwin_arm.go diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_darwin_arm64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/syscall_darwin_arm64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_darwin_arm64.go diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_dragonfly.go similarity index 100% rename from vendor/golang.org/x/sys/unix/syscall_dragonfly.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_dragonfly.go diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_dragonfly_386.go similarity index 100% rename from vendor/golang.org/x/sys/unix/syscall_dragonfly_386.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_dragonfly_386.go diff --git a/vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_dragonfly_amd64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/syscall_dragonfly_amd64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_dragonfly_amd64.go diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_freebsd.go similarity index 100% rename from vendor/golang.org/x/sys/unix/syscall_freebsd.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_freebsd.go diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_freebsd_386.go similarity index 100% rename from vendor/golang.org/x/sys/unix/syscall_freebsd_386.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_freebsd_386.go diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_freebsd_amd64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/syscall_freebsd_amd64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_freebsd_amd64.go diff --git a/vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_freebsd_arm.go similarity index 100% rename from vendor/golang.org/x/sys/unix/syscall_freebsd_arm.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_freebsd_arm.go diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_linux.go similarity index 100% rename from vendor/golang.org/x/sys/unix/syscall_linux.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_linux.go diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_linux_386.go similarity index 100% rename from vendor/golang.org/x/sys/unix/syscall_linux_386.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_linux_386.go diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_linux_amd64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/syscall_linux_amd64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_linux_amd64.go diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_linux_arm.go similarity index 100% rename from vendor/golang.org/x/sys/unix/syscall_linux_arm.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_linux_arm.go diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_arm64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_linux_arm64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/syscall_linux_arm64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_linux_arm64.go diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_linux_ppc64x.go similarity index 100% rename from vendor/golang.org/x/sys/unix/syscall_linux_ppc64x.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_linux_ppc64x.go diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_netbsd.go similarity index 100% rename from vendor/golang.org/x/sys/unix/syscall_netbsd.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_netbsd.go diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_netbsd_386.go similarity index 100% rename from vendor/golang.org/x/sys/unix/syscall_netbsd_386.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_netbsd_386.go diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_netbsd_amd64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/syscall_netbsd_amd64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_netbsd_amd64.go diff --git a/vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_netbsd_arm.go similarity index 100% rename from vendor/golang.org/x/sys/unix/syscall_netbsd_arm.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_netbsd_arm.go diff --git a/vendor/golang.org/x/sys/unix/syscall_no_getwd.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_no_getwd.go similarity index 100% rename from vendor/golang.org/x/sys/unix/syscall_no_getwd.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_no_getwd.go diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_openbsd.go similarity index 100% rename from vendor/golang.org/x/sys/unix/syscall_openbsd.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_openbsd.go diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_openbsd_386.go similarity index 100% rename from vendor/golang.org/x/sys/unix/syscall_openbsd_386.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_openbsd_386.go diff --git a/vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_openbsd_amd64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/syscall_openbsd_amd64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_openbsd_amd64.go diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_solaris.go similarity index 100% rename from vendor/golang.org/x/sys/unix/syscall_solaris.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_solaris.go diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_solaris_amd64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/syscall_solaris_amd64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_solaris_amd64.go diff --git a/vendor/golang.org/x/sys/unix/syscall_unix.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_unix.go similarity index 100% rename from vendor/golang.org/x/sys/unix/syscall_unix.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/syscall_unix.go diff --git a/vendor/golang.org/x/sys/unix/types_darwin.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/types_darwin.go similarity index 100% rename from vendor/golang.org/x/sys/unix/types_darwin.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/types_darwin.go diff --git a/vendor/golang.org/x/sys/unix/types_dragonfly.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/types_dragonfly.go similarity index 100% rename from vendor/golang.org/x/sys/unix/types_dragonfly.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/types_dragonfly.go diff --git a/vendor/golang.org/x/sys/unix/types_freebsd.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/types_freebsd.go similarity index 100% rename from vendor/golang.org/x/sys/unix/types_freebsd.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/types_freebsd.go diff --git a/vendor/golang.org/x/sys/unix/types_linux.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/types_linux.go similarity index 100% rename from vendor/golang.org/x/sys/unix/types_linux.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/types_linux.go diff --git a/vendor/golang.org/x/sys/unix/types_netbsd.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/types_netbsd.go similarity index 100% rename from vendor/golang.org/x/sys/unix/types_netbsd.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/types_netbsd.go diff --git a/vendor/golang.org/x/sys/unix/types_openbsd.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/types_openbsd.go similarity index 100% rename from vendor/golang.org/x/sys/unix/types_openbsd.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/types_openbsd.go diff --git a/vendor/golang.org/x/sys/unix/types_solaris.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/types_solaris.go similarity index 100% rename from vendor/golang.org/x/sys/unix/types_solaris.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/types_solaris.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_darwin_386.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zerrors_darwin_386.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_darwin_386.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_darwin_amd64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zerrors_darwin_amd64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_darwin_amd64.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_darwin_arm.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zerrors_darwin_arm.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_darwin_arm.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_darwin_arm64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zerrors_darwin_arm64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_darwin_arm64.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_dragonfly_386.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zerrors_dragonfly_386.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_dragonfly_386.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_dragonfly_amd64.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_freebsd_386.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zerrors_freebsd_386.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_freebsd_386.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_freebsd_amd64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zerrors_freebsd_amd64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_freebsd_amd64.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_freebsd_arm.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zerrors_freebsd_arm.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_freebsd_arm.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_linux_386.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zerrors_linux_386.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_linux_386.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_linux_amd64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_linux_amd64.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_linux_arm.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zerrors_linux_arm.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_linux_arm.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_linux_arm64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_linux_arm64.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_linux_ppc64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_linux_ppc64.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_linux_ppc64le.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_linux_ppc64le.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_netbsd_386.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zerrors_netbsd_386.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_netbsd_386.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_netbsd_amd64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zerrors_netbsd_amd64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_netbsd_amd64.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_netbsd_arm.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zerrors_netbsd_arm.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_netbsd_arm.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_openbsd_386.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zerrors_openbsd_386.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_openbsd_386.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_openbsd_amd64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zerrors_openbsd_amd64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_openbsd_amd64.go diff --git a/vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_solaris_amd64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zerrors_solaris_amd64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zerrors_solaris_amd64.go diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_darwin_386.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zsyscall_darwin_386.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_darwin_386.go diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_darwin_amd64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_darwin_amd64.go diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_darwin_arm.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zsyscall_darwin_arm.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_darwin_arm.go diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_darwin_arm64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_darwin_arm64.go diff --git a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_dragonfly_386.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zsyscall_dragonfly_386.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_dragonfly_386.go diff --git a/vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_dragonfly_amd64.go diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_freebsd_386.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zsyscall_freebsd_386.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_freebsd_386.go diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_freebsd_amd64.go diff --git a/vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_freebsd_arm.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zsyscall_freebsd_arm.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_freebsd_arm.go diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_linux_386.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zsyscall_linux_386.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_linux_386.go diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_linux_amd64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_linux_amd64.go diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_linux_arm.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_linux_arm.go diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_linux_arm64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_linux_arm64.go diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_linux_ppc64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_linux_ppc64.go diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_netbsd_386.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zsyscall_netbsd_386.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_netbsd_386.go diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_netbsd_amd64.go diff --git a/vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_netbsd_arm.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zsyscall_netbsd_arm.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_netbsd_arm.go diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_openbsd_386.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zsyscall_openbsd_386.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_openbsd_386.go diff --git a/vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_openbsd_amd64.go diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_solaris_amd64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsyscall_solaris_amd64.go diff --git a/vendor/golang.org/x/sys/unix/zsysctl_openbsd.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysctl_openbsd.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zsysctl_openbsd.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysctl_openbsd.go diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_darwin_386.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zsysnum_darwin_386.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_darwin_386.go diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_darwin_amd64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zsysnum_darwin_amd64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_darwin_amd64.go diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_darwin_arm.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zsysnum_darwin_arm.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_darwin_arm.go diff --git a/vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_darwin_arm64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zsysnum_darwin_arm64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_darwin_arm64.go diff --git a/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_dragonfly_386.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zsysnum_dragonfly_386.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_dragonfly_386.go diff --git a/vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_dragonfly_amd64.go diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_freebsd_386.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zsysnum_freebsd_386.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_freebsd_386.go diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_freebsd_amd64.go diff --git a/vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_freebsd_arm.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zsysnum_freebsd_arm.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_freebsd_arm.go diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_linux_386.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zsysnum_linux_386.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_linux_386.go diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_linux_amd64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_linux_amd64.go diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_linux_arm.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_linux_arm.go diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_linux_arm64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_linux_arm64.go diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_linux_ppc64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_linux_ppc64.go diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_netbsd_386.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zsysnum_netbsd_386.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_netbsd_386.go diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_netbsd_amd64.go diff --git a/vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_netbsd_arm.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zsysnum_netbsd_arm.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_netbsd_arm.go diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_openbsd_386.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zsysnum_openbsd_386.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_openbsd_386.go diff --git a/vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_openbsd_amd64.go diff --git a/vendor/golang.org/x/sys/unix/zsysnum_solaris_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_solaris_amd64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/zsysnum_solaris_amd64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/zsysnum_solaris_amd64.go diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_darwin_386.go similarity index 100% rename from vendor/golang.org/x/sys/unix/ztypes_darwin_386.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_darwin_386.go diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_darwin_amd64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/ztypes_darwin_amd64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_darwin_amd64.go diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_darwin_arm.go similarity index 100% rename from vendor/golang.org/x/sys/unix/ztypes_darwin_arm.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_darwin_arm.go diff --git a/vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_darwin_arm64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/ztypes_darwin_arm64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_darwin_arm64.go diff --git a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_dragonfly_386.go similarity index 100% rename from vendor/golang.org/x/sys/unix/ztypes_dragonfly_386.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_dragonfly_386.go diff --git a/vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_dragonfly_amd64.go diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_freebsd_386.go similarity index 100% rename from vendor/golang.org/x/sys/unix/ztypes_freebsd_386.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_freebsd_386.go diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_freebsd_amd64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/ztypes_freebsd_amd64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_freebsd_amd64.go diff --git a/vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_freebsd_arm.go similarity index 100% rename from vendor/golang.org/x/sys/unix/ztypes_freebsd_arm.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_freebsd_arm.go diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_linux_386.go similarity index 100% rename from vendor/golang.org/x/sys/unix/ztypes_linux_386.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_linux_386.go diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_linux_amd64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_linux_amd64.go diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_linux_arm.go similarity index 100% rename from vendor/golang.org/x/sys/unix/ztypes_linux_arm.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_linux_arm.go diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_linux_arm64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_linux_arm64.go diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_linux_ppc64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_linux_ppc64.go diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_linux_ppc64le.go similarity index 100% rename from vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_linux_ppc64le.go diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_netbsd_386.go similarity index 100% rename from vendor/golang.org/x/sys/unix/ztypes_netbsd_386.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_netbsd_386.go diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_netbsd_amd64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/ztypes_netbsd_amd64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_netbsd_amd64.go diff --git a/vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_netbsd_arm.go similarity index 100% rename from vendor/golang.org/x/sys/unix/ztypes_netbsd_arm.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_netbsd_arm.go diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_openbsd_386.go similarity index 100% rename from vendor/golang.org/x/sys/unix/ztypes_openbsd_386.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_openbsd_386.go diff --git a/vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_openbsd_amd64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/ztypes_openbsd_amd64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_openbsd_amd64.go diff --git a/vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go b/vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_solaris_amd64.go similarity index 100% rename from vendor/golang.org/x/sys/unix/ztypes_solaris_amd64.go rename to vendor/github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix/ztypes_solaris_amd64.go diff --git a/vendor/github.com/fsouza/go-dockerclient/network.go b/vendor/github.com/fsouza/go-dockerclient/network.go index a7fc152de..30d54230a 100644 --- a/vendor/github.com/fsouza/go-dockerclient/network.go +++ b/vendor/github.com/fsouza/go-dockerclient/network.go @@ -17,19 +17,20 @@ var ErrNetworkAlreadyExists = errors.New("network already exists") // Network represents a network. // -// See https://goo.gl/1kmPKZ for more details. +// See https://goo.gl/6GugX3 for more details. type Network struct { Name string ID string `json:"Id"` Scope string Driver string + IPAM IPAMOptions Containers map[string]Endpoint Options map[string]string } // Endpoint contains network resources allocated and used for a container in a network // -// See https://goo.gl/1kmPKZ for more details. +// See https://goo.gl/6GugX3 for more details. type Endpoint struct { Name string ID string `json:"EndpointID"` @@ -40,7 +41,7 @@ type Endpoint struct { // ListNetworks returns all networks. // -// See https://goo.gl/1kmPKZ for more details. +// See https://goo.gl/6GugX3 for more details. func (c *Client) ListNetworks() ([]Network, error) { resp, err := c.do("GET", "/networks", doOptions{}) if err != nil { @@ -56,7 +57,7 @@ func (c *Client) ListNetworks() ([]Network, error) { // NetworkInfo returns information about a network by its ID. // -// See https://goo.gl/1kmPKZ for more details. +// See https://goo.gl/6GugX3 for more details. func (c *Client) NetworkInfo(id string) (*Network, error) { path := "/networks/" + id resp, err := c.do("GET", path, doOptions{}) @@ -77,7 +78,7 @@ func (c *Client) NetworkInfo(id string) (*Network, error) { // CreateNetworkOptions specify parameters to the CreateNetwork function and // (for now) is the expected body of the "create network" http request message // -// See https://goo.gl/1kmPKZ for more details. +// See https://goo.gl/6GugX3 for more details. type CreateNetworkOptions struct { Name string `json:"Name"` CheckDuplicate bool `json:"CheckDuplicate"` @@ -107,7 +108,7 @@ type IPAMConfig struct { // CreateNetwork creates a new network, returning the network instance, // or an error in case of failure. // -// See https://goo.gl/1kmPKZ for more details. +// See https://goo.gl/6GugX3 for more details. func (c *Client) CreateNetwork(opts CreateNetworkOptions) (*Network, error) { resp, err := c.do( "POST", @@ -142,9 +143,9 @@ func (c *Client) CreateNetwork(opts CreateNetworkOptions) (*Network, error) { return &network, nil } -// RemoveNetwork removes a network or an error in case of failure. +// RemoveNetwork removes a network or returns an error in case of failure. // -// See https://goo.gl/1kmPKZ for more details. +// See https://goo.gl/6GugX3 for more details. func (c *Client) RemoveNetwork(id string) error { resp, err := c.do("DELETE", "/networks/"+id, doOptions{}) if err != nil { @@ -157,6 +158,43 @@ func (c *Client) RemoveNetwork(id string) error { return nil } +// NetworkConnectionOptions specify parameters to the ConnectNetwork and DisconnectNetwork function. +// +// See https://goo.gl/6GugX3 for more details. +type NetworkConnectionOptions struct { + Container string +} + +// ConnectNetwork adds a container to a network or returns an error in case of failure. +// +// See https://goo.gl/6GugX3 for more details. +func (c *Client) ConnectNetwork(id string, opts NetworkConnectionOptions) error { + resp, err := c.do("POST", "/networks/"+id+"/connect", doOptions{data: opts}) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return &NoSuchNetworkOrContainer{NetworkID: id, ContainerID: opts.Container} + } + return err + } + resp.Body.Close() + return nil +} + +// DisconnectNetwork removes a container from a network or returns an error in case of failure. +// +// See https://goo.gl/6GugX3 for more details. +func (c *Client) DisconnectNetwork(id string, opts NetworkConnectionOptions) error { + resp, err := c.do("POST", "/networks/"+id+"/disconnect", doOptions{data: opts}) + if err != nil { + if e, ok := err.(*Error); ok && e.Status == http.StatusNotFound { + return &NoSuchNetworkOrContainer{NetworkID: id, ContainerID: opts.Container} + } + return err + } + resp.Body.Close() + return nil +} + // NoSuchNetwork is the error returned when a given network does not exist. type NoSuchNetwork struct { ID string @@ -165,3 +203,13 @@ type NoSuchNetwork struct { func (err *NoSuchNetwork) Error() string { return fmt.Sprintf("No such network: %s", err.ID) } + +// NoSuchNetwork is the error returned when a given network or container does not exist. +type NoSuchNetworkOrContainer struct { + NetworkID string + ContainerID string +} + +func (err *NoSuchNetworkOrContainer) Error() string { + return fmt.Sprintf("No such network (%s) or container (%s)", err.NetworkID, err.ContainerID) +} diff --git a/vendor/github.com/go-ini/ini/ini.go b/vendor/github.com/go-ini/ini/ini.go index 5a13dc489..1a27f0680 100644 --- a/vendor/github.com/go-ini/ini/ini.go +++ b/vendor/github.com/go-ini/ini/ini.go @@ -34,7 +34,7 @@ const ( // Maximum allowed depth when recursively substituing variable names. _DEPTH_VALUES = 99 - _VERSION = "1.8.5" + _VERSION = "1.8.6" ) func Version() string { @@ -453,7 +453,7 @@ func (k *Key) RangeTime(defaultVal, min, max time.Time) time.Time { return k.RangeTimeFormat(time.RFC3339, defaultVal, min, max) } -// Strings returns list of string devide by given delimiter. +// Strings returns list of string divided by given delimiter. func (k *Key) Strings(delim string) []string { str := k.String() if len(str) == 0 { @@ -467,7 +467,7 @@ func (k *Key) Strings(delim string) []string { return vals } -// Float64s returns list of float64 devide by given delimiter. +// Float64s returns list of float64 divided by given delimiter. func (k *Key) Float64s(delim string) []float64 { strs := k.Strings(delim) vals := make([]float64, len(strs)) @@ -477,7 +477,7 @@ func (k *Key) Float64s(delim string) []float64 { return vals } -// Ints returns list of int devide by given delimiter. +// Ints returns list of int divided by given delimiter. func (k *Key) Ints(delim string) []int { strs := k.Strings(delim) vals := make([]int, len(strs)) @@ -487,7 +487,7 @@ func (k *Key) Ints(delim string) []int { return vals } -// Int64s returns list of int64 devide by given delimiter. +// Int64s returns list of int64 divided by given delimiter. func (k *Key) Int64s(delim string) []int64 { strs := k.Strings(delim) vals := make([]int64, len(strs)) @@ -497,18 +497,18 @@ func (k *Key) Int64s(delim string) []int64 { return vals } -// Uints returns list of uint devide by given delimiter. +// Uints returns list of uint divided by given delimiter. func (k *Key) Uints(delim string) []uint { strs := k.Strings(delim) vals := make([]uint, len(strs)) for i := range strs { - u, _ := strconv.ParseUint(strs[i], 10, 64) + u, _ := strconv.ParseUint(strs[i], 10, 0) vals[i] = uint(u) } return vals } -// Uint64s returns list of uint64 devide by given delimiter. +// Uint64s returns list of uint64 divided by given delimiter. func (k *Key) Uint64s(delim string) []uint64 { strs := k.Strings(delim) vals := make([]uint64, len(strs)) @@ -518,7 +518,7 @@ func (k *Key) Uint64s(delim string) []uint64 { return vals } -// TimesFormat parses with given format and returns list of time.Time devide by given delimiter. +// TimesFormat parses with given format and returns list of time.Time divided by given delimiter. func (k *Key) TimesFormat(format, delim string) []time.Time { strs := k.Strings(delim) vals := make([]time.Time, len(strs)) @@ -528,7 +528,7 @@ func (k *Key) TimesFormat(format, delim string) []time.Time { return vals } -// Times parses with RFC3339 format and returns list of time.Time devide by given delimiter. +// Times parses with RFC3339 format and returns list of time.Time divided by given delimiter. func (k *Key) Times(delim string) []time.Time { return k.TimesFormat(time.RFC3339, delim) } diff --git a/vendor/github.com/golang/protobuf/proto/Makefile b/vendor/github.com/golang/protobuf/proto/Makefile new file mode 100644 index 000000000..f1f06564a --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/Makefile @@ -0,0 +1,43 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +install: + go install + +test: install generate-test-pbs + go test + + +generate-test-pbs: + make install + make -C testdata + protoc --go_out=Mtestdata/test.proto=github.com/golang/protobuf/proto/testdata:. proto3_proto/proto3.proto + make diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go new file mode 100644 index 000000000..e98ddec98 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/clone.go @@ -0,0 +1,223 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer deep copy and merge. +// TODO: RawMessage. + +package proto + +import ( + "log" + "reflect" + "strings" +) + +// Clone returns a deep copy of a protocol buffer. +func Clone(pb Message) Message { + in := reflect.ValueOf(pb) + if in.IsNil() { + return pb + } + + out := reflect.New(in.Type().Elem()) + // out is empty so a merge is a deep copy. + mergeStruct(out.Elem(), in.Elem()) + return out.Interface().(Message) +} + +// Merge merges src into dst. +// Required and optional fields that are set in src will be set to that value in dst. +// Elements of repeated fields will be appended. +// Merge panics if src and dst are not the same type, or if dst is nil. +func Merge(dst, src Message) { + in := reflect.ValueOf(src) + out := reflect.ValueOf(dst) + if out.IsNil() { + panic("proto: nil destination") + } + if in.Type() != out.Type() { + // Explicit test prior to mergeStruct so that mistyped nils will fail + panic("proto: type mismatch") + } + if in.IsNil() { + // Merging nil into non-nil is a quiet no-op + return + } + mergeStruct(out.Elem(), in.Elem()) +} + +func mergeStruct(out, in reflect.Value) { + sprop := GetProperties(in.Type()) + for i := 0; i < in.NumField(); i++ { + f := in.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) + } + + if emIn, ok := in.Addr().Interface().(extendableProto); ok { + emOut := out.Addr().Interface().(extendableProto) + mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap()) + } + + uf := in.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return + } + uin := uf.Bytes() + if len(uin) > 0 { + out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) + } +} + +// mergeAny performs a merge between two values of the same type. +// viaPtr indicates whether the values were indirected through a pointer (implying proto2). +// prop is set if this is a struct field (it may be nil). +func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { + if in.Type() == protoMessageType { + if !in.IsNil() { + if out.IsNil() { + out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) + } else { + Merge(out.Interface().(Message), in.Interface().(Message)) + } + } + return + } + switch in.Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + if !viaPtr && isProto3Zero(in) { + return + } + out.Set(in) + case reflect.Interface: + // Probably a oneof field; copy non-nil values. + if in.IsNil() { + return + } + // Allocate destination if it is not set, or set to a different type. + // Otherwise we will merge as normal. + if out.IsNil() || out.Elem().Type() != in.Elem().Type() { + out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) + } + mergeAny(out.Elem(), in.Elem(), false, nil) + case reflect.Map: + if in.Len() == 0 { + return + } + if out.IsNil() { + out.Set(reflect.MakeMap(in.Type())) + } + // For maps with value types of *T or []byte we need to deep copy each value. + elemKind := in.Type().Elem().Kind() + for _, key := range in.MapKeys() { + var val reflect.Value + switch elemKind { + case reflect.Ptr: + val = reflect.New(in.Type().Elem().Elem()) + mergeAny(val, in.MapIndex(key), false, nil) + case reflect.Slice: + val = in.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + default: + val = in.MapIndex(key) + } + out.SetMapIndex(key, val) + } + case reflect.Ptr: + if in.IsNil() { + return + } + if out.IsNil() { + out.Set(reflect.New(in.Elem().Type())) + } + mergeAny(out.Elem(), in.Elem(), true, nil) + case reflect.Slice: + if in.IsNil() { + return + } + if in.Type().Elem().Kind() == reflect.Uint8 { + // []byte is a scalar bytes field, not a repeated field. + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value, and should not + // be merged. + if prop != nil && prop.proto3 && in.Len() == 0 { + return + } + + // Make a deep copy. + // Append to []byte{} instead of []byte(nil) so that we never end up + // with a nil result. + out.SetBytes(append([]byte{}, in.Bytes()...)) + return + } + n := in.Len() + if out.IsNil() { + out.Set(reflect.MakeSlice(in.Type(), 0, n)) + } + switch in.Type().Elem().Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + out.Set(reflect.AppendSlice(out, in)) + default: + for i := 0; i < n; i++ { + x := reflect.Indirect(reflect.New(in.Type().Elem())) + mergeAny(x, in.Index(i), false, nil) + out.Set(reflect.Append(out, x)) + } + } + case reflect.Struct: + mergeStruct(out, in) + default: + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to copy %v", in) + } +} + +func mergeExtension(out, in map[int32]Extension) { + for extNum, eIn := range in { + eOut := Extension{desc: eIn.desc} + if eIn.value != nil { + v := reflect.New(reflect.TypeOf(eIn.value)).Elem() + mergeAny(v, reflect.ValueOf(eIn.value), false, nil) + eOut.value = v.Interface() + } + if eIn.enc != nil { + eOut.enc = make([]byte, len(eIn.enc)) + copy(eOut.enc, eIn.enc) + } + + out[extNum] = eOut + } +} diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go new file mode 100644 index 000000000..5810782fd --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/decode.go @@ -0,0 +1,867 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for decoding protocol buffer data to construct in-memory representations. + */ + +import ( + "errors" + "fmt" + "io" + "os" + "reflect" +) + +// errOverflow is returned when an integer is too large to be represented. +var errOverflow = errors.New("proto: integer overflow") + +// ErrInternalBadWireType is returned by generated code when an incorrect +// wire type is encountered. It does not get returned to user code. +var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") + +// The fundamental decoders that interpret bytes on the wire. +// Those that take integer types all return uint64 and are +// therefore of type valueDecoder. + +// DecodeVarint reads a varint-encoded integer from the slice. +// It returns the integer and the number of bytes consumed, or +// zero if there is not enough. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func DecodeVarint(buf []byte) (x uint64, n int) { + // x, n already 0 + for shift := uint(0); shift < 64; shift += 7 { + if n >= len(buf) { + return 0, 0 + } + b := uint64(buf[n]) + n++ + x |= (b & 0x7F) << shift + if (b & 0x80) == 0 { + return x, n + } + } + + // The number is too large to represent in a 64-bit value. + return 0, 0 +} + +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) DecodeVarint() (x uint64, err error) { + // x, err already 0 + + i := p.index + l := len(p.buf) + + for shift := uint(0); shift < 64; shift += 7 { + if i >= l { + err = io.ErrUnexpectedEOF + return + } + b := p.buf[i] + i++ + x |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + p.index = i + return + } + } + + // The number is too large to represent in a 64-bit value. + err = errOverflow + return +} + +// DecodeFixed64 reads a 64-bit integer from the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) DecodeFixed64() (x uint64, err error) { + // x, err already 0 + i := p.index + 8 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-8]) + x |= uint64(p.buf[i-7]) << 8 + x |= uint64(p.buf[i-6]) << 16 + x |= uint64(p.buf[i-5]) << 24 + x |= uint64(p.buf[i-4]) << 32 + x |= uint64(p.buf[i-3]) << 40 + x |= uint64(p.buf[i-2]) << 48 + x |= uint64(p.buf[i-1]) << 56 + return +} + +// DecodeFixed32 reads a 32-bit integer from the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) DecodeFixed32() (x uint64, err error) { + // x, err already 0 + i := p.index + 4 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-4]) + x |= uint64(p.buf[i-3]) << 8 + x |= uint64(p.buf[i-2]) << 16 + x |= uint64(p.buf[i-1]) << 24 + return +} + +// DecodeZigzag64 reads a zigzag-encoded 64-bit integer +// from the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) DecodeZigzag64() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) + return +} + +// DecodeZigzag32 reads a zigzag-encoded 32-bit integer +// from the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) DecodeZigzag32() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) + return +} + +// These are not ValueDecoders: they produce an array of bytes or a string. +// bytes, embedded messages + +// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { + n, err := p.DecodeVarint() + if err != nil { + return nil, err + } + + nb := int(n) + if nb < 0 { + return nil, fmt.Errorf("proto: bad byte length %d", nb) + } + end := p.index + nb + if end < p.index || end > len(p.buf) { + return nil, io.ErrUnexpectedEOF + } + + if !alloc { + // todo: check if can get more uses of alloc=false + buf = p.buf[p.index:end] + p.index += nb + return + } + + buf = make([]byte, nb) + copy(buf, p.buf[p.index:]) + p.index += nb + return +} + +// DecodeStringBytes reads an encoded string from the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) DecodeStringBytes() (s string, err error) { + buf, err := p.DecodeRawBytes(false) + if err != nil { + return + } + return string(buf), nil +} + +// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. +// If the protocol buffer has extensions, and the field matches, add it as an extension. +// Otherwise, if the XXX_unrecognized field exists, append the skipped data there. +func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { + oi := o.index + + err := o.skip(t, tag, wire) + if err != nil { + return err + } + + if !unrecField.IsValid() { + return nil + } + + ptr := structPointer_Bytes(base, unrecField) + + // Add the skipped field to struct field + obuf := o.buf + + o.buf = *ptr + o.EncodeVarint(uint64(tag<<3 | wire)) + *ptr = append(o.buf, obuf[oi:o.index]...) + + o.buf = obuf + + return nil +} + +// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. +func (o *Buffer) skip(t reflect.Type, tag, wire int) error { + + var u uint64 + var err error + + switch wire { + case WireVarint: + _, err = o.DecodeVarint() + case WireFixed64: + _, err = o.DecodeFixed64() + case WireBytes: + _, err = o.DecodeRawBytes(false) + case WireFixed32: + _, err = o.DecodeFixed32() + case WireStartGroup: + for { + u, err = o.DecodeVarint() + if err != nil { + break + } + fwire := int(u & 0x7) + if fwire == WireEndGroup { + break + } + ftag := int(u >> 3) + err = o.skip(t, ftag, fwire) + if err != nil { + break + } + } + default: + err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) + } + return err +} + +// Unmarshaler is the interface representing objects that can +// unmarshal themselves. The method should reset the receiver before +// decoding starts. The argument points to data that may be +// overwritten, so implementations should not keep references to the +// buffer. +type Unmarshaler interface { + Unmarshal([]byte) error +} + +// Unmarshal parses the protocol buffer representation in buf and places the +// decoded result in pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// Unmarshal resets pb before starting to unmarshal, so any +// existing data in pb is always removed. Use UnmarshalMerge +// to preserve and append to existing data. +func Unmarshal(buf []byte, pb Message) error { + pb.Reset() + return UnmarshalMerge(buf, pb) +} + +// UnmarshalMerge parses the protocol buffer representation in buf and +// writes the decoded result to pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// UnmarshalMerge merges into existing data in pb. +// Most code should use Unmarshal instead. +func UnmarshalMerge(buf []byte, pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// DecodeMessage reads a count-delimited message from the Buffer. +func (p *Buffer) DecodeMessage(pb Message) error { + enc, err := p.DecodeRawBytes(false) + if err != nil { + return err + } + return NewBuffer(enc).Unmarshal(pb) +} + +// DecodeGroup reads a tag-delimited group from the Buffer. +func (p *Buffer) DecodeGroup(pb Message) error { + typ, base, err := getbase(pb) + if err != nil { + return err + } + return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base) +} + +// Unmarshal parses the protocol buffer representation in the +// Buffer and places the decoded result in pb. If the struct +// underlying pb does not match the data in the buffer, the results can be +// unpredictable. +func (p *Buffer) Unmarshal(pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(Unmarshaler); ok { + err := u.Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + + typ, base, err := getbase(pb) + if err != nil { + return err + } + + err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) + + if collectStats { + stats.Decode++ + } + + return err +} + +// unmarshalType does the work of unmarshaling a structure. +func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { + var state errorState + required, reqFields := prop.reqCount, uint64(0) + + var err error + for err == nil && o.index < len(o.buf) { + oi := o.index + var u uint64 + u, err = o.DecodeVarint() + if err != nil { + break + } + wire := int(u & 0x7) + if wire == WireEndGroup { + if is_group { + return nil // input is satisfied + } + return fmt.Errorf("proto: %s: wiretype end group for non-group", st) + } + tag := int(u >> 3) + if tag <= 0 { + return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) + } + fieldnum, ok := prop.decoderTags.get(tag) + if !ok { + // Maybe it's an extension? + if prop.extendable { + if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) { + if err = o.skip(st, tag, wire); err == nil { + ext := e.ExtensionMap()[int32(tag)] // may be missing + ext.enc = append(ext.enc, o.buf[oi:o.index]...) + e.ExtensionMap()[int32(tag)] = ext + } + continue + } + } + // Maybe it's a oneof? + if prop.oneofUnmarshaler != nil { + m := structPointer_Interface(base, st).(Message) + // First return value indicates whether tag is a oneof field. + ok, err = prop.oneofUnmarshaler(m, tag, wire, o) + if err == ErrInternalBadWireType { + // Map the error to something more descriptive. + // Do the formatting here to save generated code space. + err = fmt.Errorf("bad wiretype for oneof field in %T", m) + } + if ok { + continue + } + } + err = o.skipAndSave(st, tag, wire, base, prop.unrecField) + continue + } + p := prop.Prop[fieldnum] + + if p.dec == nil { + fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) + continue + } + dec := p.dec + if wire != WireStartGroup && wire != p.WireType { + if wire == WireBytes && p.packedDec != nil { + // a packable field + dec = p.packedDec + } else { + err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) + continue + } + } + decErr := dec(o, p, base) + if decErr != nil && !state.shouldContinue(decErr, p) { + err = decErr + } + if err == nil && p.Required { + // Successfully decoded a required field. + if tag <= 64 { + // use bitmap for fields 1-64 to catch field reuse. + var mask uint64 = 1 << uint64(tag-1) + if reqFields&mask == 0 { + // new required field + reqFields |= mask + required-- + } + } else { + // This is imprecise. It can be fooled by a required field + // with a tag > 64 that is encoded twice; that's very rare. + // A fully correct implementation would require allocating + // a data structure, which we would like to avoid. + required-- + } + } + } + if err == nil { + if is_group { + return io.ErrUnexpectedEOF + } + if state.err != nil { + return state.err + } + if required > 0 { + // Not enough information to determine the exact field. If we use extra + // CPU, we could determine the field only if the missing required field + // has a tag <= 64 and we check reqFields. + return &RequiredNotSetError{"{Unknown}"} + } + } + return err +} + +// Individual type decoders +// For each, +// u is the decoded value, +// v is a pointer to the field (pointer) in the struct + +// Sizes of the pools to allocate inside the Buffer. +// The goal is modest amortization and allocation +// on at least 16-byte boundaries. +const ( + boolPoolSize = 16 + uint32PoolSize = 8 + uint64PoolSize = 4 +) + +// Decode a bool. +func (o *Buffer) dec_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + if len(o.bools) == 0 { + o.bools = make([]bool, boolPoolSize) + } + o.bools[0] = u != 0 + *structPointer_Bool(base, p.field) = &o.bools[0] + o.bools = o.bools[1:] + return nil +} + +func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + *structPointer_BoolVal(base, p.field) = u != 0 + return nil +} + +// Decode an int32. +func (o *Buffer) dec_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) + return nil +} + +func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) + return nil +} + +// Decode an int64. +func (o *Buffer) dec_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word64_Set(structPointer_Word64(base, p.field), o, u) + return nil +} + +func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word64Val_Set(structPointer_Word64Val(base, p.field), o, u) + return nil +} + +// Decode a string. +func (o *Buffer) dec_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + *structPointer_String(base, p.field) = &s + return nil +} + +func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + *structPointer_StringVal(base, p.field) = s + return nil +} + +// Decode a slice of bytes ([]byte). +func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + *structPointer_Bytes(base, p.field) = b + return nil +} + +// Decode a slice of bools ([]bool). +func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + v := structPointer_BoolSlice(base, p.field) + *v = append(*v, u != 0) + return nil +} + +// Decode a slice of bools ([]bool) in packed format. +func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { + v := structPointer_BoolSlice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded bools + fin := o.index + nb + if fin < o.index { + return errOverflow + } + + y := *v + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + y = append(y, u != 0) + } + + *v = y + return nil +} + +// Decode a slice of int32s ([]int32). +func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + structPointer_Word32Slice(base, p.field).Append(uint32(u)) + return nil +} + +// Decode a slice of int32s ([]int32) in packed format. +func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { + v := structPointer_Word32Slice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded int32s + + fin := o.index + nb + if fin < o.index { + return errOverflow + } + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + v.Append(uint32(u)) + } + return nil +} + +// Decode a slice of int64s ([]int64). +func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + + structPointer_Word64Slice(base, p.field).Append(u) + return nil +} + +// Decode a slice of int64s ([]int64) in packed format. +func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { + v := structPointer_Word64Slice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded int64s + + fin := o.index + nb + if fin < o.index { + return errOverflow + } + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + v.Append(u) + } + return nil +} + +// Decode a slice of strings ([]string). +func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + v := structPointer_StringSlice(base, p.field) + *v = append(*v, s) + return nil +} + +// Decode a slice of slice of bytes ([][]byte). +func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + v := structPointer_BytesSlice(base, p.field) + *v = append(*v, b) + return nil +} + +// Decode a map field. +func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { + raw, err := o.DecodeRawBytes(false) + if err != nil { + return err + } + oi := o.index // index at the end of this map entry + o.index -= len(raw) // move buffer back to start of map entry + + mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V + if mptr.Elem().IsNil() { + mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) + } + v := mptr.Elem() // map[K]V + + // Prepare addressable doubly-indirect placeholders for the key and value types. + // See enc_new_map for why. + keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K + keybase := toStructPointer(keyptr.Addr()) // **K + + var valbase structPointer + var valptr reflect.Value + switch p.mtype.Elem().Kind() { + case reflect.Slice: + // []byte + var dummy []byte + valptr = reflect.ValueOf(&dummy) // *[]byte + valbase = toStructPointer(valptr) // *[]byte + case reflect.Ptr: + // message; valptr is **Msg; need to allocate the intermediate pointer + valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V + valptr.Set(reflect.New(valptr.Type().Elem())) + valbase = toStructPointer(valptr) + default: + // everything else + valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V + valbase = toStructPointer(valptr.Addr()) // **V + } + + // Decode. + // This parses a restricted wire format, namely the encoding of a message + // with two fields. See enc_new_map for the format. + for o.index < oi { + // tagcode for key and value properties are always a single byte + // because they have tags 1 and 2. + tagcode := o.buf[o.index] + o.index++ + switch tagcode { + case p.mkeyprop.tagcode[0]: + if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { + return err + } + case p.mvalprop.tagcode[0]: + if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { + return err + } + default: + // TODO: Should we silently skip this instead? + return fmt.Errorf("proto: bad map data tag %d", raw[0]) + } + } + keyelem, valelem := keyptr.Elem(), valptr.Elem() + if !keyelem.IsValid() || !valelem.IsValid() { + // We did not decode the key or the value in the map entry. + // Either way, it's an invalid map entry. + return fmt.Errorf("proto: bad map data: missing key/val") + } + + v.SetMapIndex(keyelem, valelem) + return nil +} + +// Decode a group. +func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { + bas := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(bas) { + // allocate new nested message + bas = toStructPointer(reflect.New(p.stype)) + structPointer_SetStructPointer(base, p.field, bas) + } + return o.unmarshalType(p.stype, p.sprop, true, bas) +} + +// Decode an embedded message. +func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { + raw, e := o.DecodeRawBytes(false) + if e != nil { + return e + } + + bas := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(bas) { + // allocate new nested message + bas = toStructPointer(reflect.New(p.stype)) + structPointer_SetStructPointer(base, p.field, bas) + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + iv := structPointer_Interface(bas, p.stype) + return iv.(Unmarshaler).Unmarshal(raw) + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, false, bas) + o.buf = obuf + o.index = oi + + return err +} + +// Decode a slice of embedded messages. +func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { + return o.dec_slice_struct(p, false, base) +} + +// Decode a slice of embedded groups. +func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { + return o.dec_slice_struct(p, true, base) +} + +// Decode a slice of structs ([]*struct). +func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { + v := reflect.New(p.stype) + bas := toStructPointer(v) + structPointer_StructPointerSlice(base, p.field).Append(bas) + + if is_group { + err := o.unmarshalType(p.stype, p.sprop, is_group, bas) + return err + } + + raw, err := o.DecodeRawBytes(false) + if err != nil { + return err + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + iv := v.Interface() + return iv.(Unmarshaler).Unmarshal(raw) + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, is_group, bas) + + o.buf = obuf + o.index = oi + + return err +} diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go new file mode 100644 index 000000000..231b07401 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/encode.go @@ -0,0 +1,1325 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "errors" + "fmt" + "reflect" + "sort" +) + +// RequiredNotSetError is the error returned if Marshal is called with +// a protocol buffer struct whose required fields have not +// all been initialized. It is also the error returned if Unmarshal is +// called with an encoded protocol buffer that does not include all the +// required fields. +// +// When printed, RequiredNotSetError reports the first unset required field in a +// message. If the field cannot be precisely determined, it is reported as +// "{Unknown}". +type RequiredNotSetError struct { + field string +} + +func (e *RequiredNotSetError) Error() string { + return fmt.Sprintf("proto: required field %q not set", e.field) +} + +var ( + // errRepeatedHasNil is the error returned if Marshal is called with + // a struct with a repeated field containing a nil element. + errRepeatedHasNil = errors.New("proto: repeated field has nil element") + + // ErrNil is the error returned if Marshal is called with nil. + ErrNil = errors.New("proto: Marshal called with nil") +) + +// The fundamental encoders that put bytes on the wire. +// Those that take integer types all accept uint64 and are +// therefore of type valueEncoder. + +const maxVarintBytes = 10 // maximum length of a varint + +// EncodeVarint returns the varint encoding of x. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +// Not used by the package itself, but helpful to clients +// wishing to use the same encoding. +func EncodeVarint(x uint64) []byte { + var buf [maxVarintBytes]byte + var n int + for n = 0; x > 127; n++ { + buf[n] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + buf[n] = uint8(x) + n++ + return buf[0:n] +} + +// EncodeVarint writes a varint-encoded integer to the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) EncodeVarint(x uint64) error { + for x >= 1<<7 { + p.buf = append(p.buf, uint8(x&0x7f|0x80)) + x >>= 7 + } + p.buf = append(p.buf, uint8(x)) + return nil +} + +// SizeVarint returns the varint encoding size of an integer. +func SizeVarint(x uint64) int { + return sizeVarint(x) +} + +func sizeVarint(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} + +// EncodeFixed64 writes a 64-bit integer to the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) EncodeFixed64(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24), + uint8(x>>32), + uint8(x>>40), + uint8(x>>48), + uint8(x>>56)) + return nil +} + +func sizeFixed64(x uint64) int { + return 8 +} + +// EncodeFixed32 writes a 32-bit integer to the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) EncodeFixed32(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24)) + return nil +} + +func sizeFixed32(x uint64) int { + return 4 +} + +// EncodeZigzag64 writes a zigzag-encoded 64-bit integer +// to the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) EncodeZigzag64(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +func sizeZigzag64(x uint64) int { + return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +// EncodeZigzag32 writes a zigzag-encoded 32-bit integer +// to the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) EncodeZigzag32(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +func sizeZigzag32(x uint64) int { + return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) EncodeRawBytes(b []byte) error { + p.EncodeVarint(uint64(len(b))) + p.buf = append(p.buf, b...) + return nil +} + +func sizeRawBytes(b []byte) int { + return sizeVarint(uint64(len(b))) + + len(b) +} + +// EncodeStringBytes writes an encoded string to the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) EncodeStringBytes(s string) error { + p.EncodeVarint(uint64(len(s))) + p.buf = append(p.buf, s...) + return nil +} + +func sizeStringBytes(s string) int { + return sizeVarint(uint64(len(s))) + + len(s) +} + +// Marshaler is the interface representing objects that can marshal themselves. +type Marshaler interface { + Marshal() ([]byte, error) +} + +// Marshal takes the protocol buffer +// and encodes it into the wire format, returning the data. +func Marshal(pb Message) ([]byte, error) { + // Can the object marshal itself? + if m, ok := pb.(Marshaler); ok { + return m.Marshal() + } + p := NewBuffer(nil) + err := p.Marshal(pb) + var state errorState + if err != nil && !state.shouldContinue(err, nil) { + return nil, err + } + if p.buf == nil && err == nil { + // Return a non-nil slice on success. + return []byte{}, nil + } + return p.buf, err +} + +// EncodeMessage writes the protocol buffer to the Buffer, +// prefixed by a varint-encoded length. +func (p *Buffer) EncodeMessage(pb Message) error { + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return ErrNil + } + if err == nil { + var state errorState + err = p.enc_len_struct(GetProperties(t.Elem()), base, &state) + } + return err +} + +// Marshal takes the protocol buffer +// and encodes it into the wire format, writing the result to the +// Buffer. +func (p *Buffer) Marshal(pb Message) error { + // Can the object marshal itself? + if m, ok := pb.(Marshaler); ok { + data, err := m.Marshal() + if err != nil { + return err + } + p.buf = append(p.buf, data...) + return nil + } + + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return ErrNil + } + if err == nil { + err = p.enc_struct(GetProperties(t.Elem()), base) + } + + if collectStats { + stats.Encode++ + } + + return err +} + +// Size returns the encoded size of a protocol buffer. +func Size(pb Message) (n int) { + // Can the object marshal itself? If so, Size is slow. + // TODO: add Size to Marshaler, or add a Sizer interface. + if m, ok := pb.(Marshaler); ok { + b, _ := m.Marshal() + return len(b) + } + + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return 0 + } + if err == nil { + n = size_struct(GetProperties(t.Elem()), base) + } + + if collectStats { + stats.Size++ + } + + return +} + +// Individual type encoders. + +// Encode a bool. +func (o *Buffer) enc_bool(p *Properties, base structPointer) error { + v := *structPointer_Bool(base, p.field) + if v == nil { + return ErrNil + } + x := 0 + if *v { + x = 1 + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error { + v := *structPointer_BoolVal(base, p.field) + if !v { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, 1) + return nil +} + +func size_bool(p *Properties, base structPointer) int { + v := *structPointer_Bool(base, p.field) + if v == nil { + return 0 + } + return len(p.tagcode) + 1 // each bool takes exactly one byte +} + +func size_proto3_bool(p *Properties, base structPointer) int { + v := *structPointer_BoolVal(base, p.field) + if !v && !p.oneof { + return 0 + } + return len(p.tagcode) + 1 // each bool takes exactly one byte +} + +// Encode an int32. +func (o *Buffer) enc_int32(p *Properties, base structPointer) error { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return ErrNil + } + x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_int32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return 0 + } + x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +func size_proto3_int32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range + if x == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +// Encode a uint32. +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return ErrNil + } + x := word32_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_uint32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return 0 + } + x := word32_Get(v) + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +func size_proto3_uint32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + if x == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +// Encode an int64. +func (o *Buffer) enc_int64(p *Properties, base structPointer) error { + v := structPointer_Word64(base, p.field) + if word64_IsNil(v) { + return ErrNil + } + x := word64_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, x) + return nil +} + +func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, x) + return nil +} + +func size_int64(p *Properties, base structPointer) (n int) { + v := structPointer_Word64(base, p.field) + if word64_IsNil(v) { + return 0 + } + x := word64_Get(v) + n += len(p.tagcode) + n += p.valSize(x) + return +} + +func size_proto3_int64(p *Properties, base structPointer) (n int) { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + if x == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += p.valSize(x) + return +} + +// Encode a string. +func (o *Buffer) enc_string(p *Properties, base structPointer) error { + v := *structPointer_String(base, p.field) + if v == nil { + return ErrNil + } + x := *v + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(x) + return nil +} + +func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error { + v := *structPointer_StringVal(base, p.field) + if v == "" { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(v) + return nil +} + +func size_string(p *Properties, base structPointer) (n int) { + v := *structPointer_String(base, p.field) + if v == nil { + return 0 + } + x := *v + n += len(p.tagcode) + n += sizeStringBytes(x) + return +} + +func size_proto3_string(p *Properties, base structPointer) (n int) { + v := *structPointer_StringVal(base, p.field) + if v == "" && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += sizeStringBytes(v) + return +} + +// All protocol buffer fields are nillable, but be careful. +func isNil(v reflect.Value) bool { + switch v.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + } + return false +} + +// Encode a message struct. +func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { + var state errorState + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return ErrNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return state.err + } + + o.buf = append(o.buf, p.tagcode...) + return o.enc_len_struct(p.sprop, structp, &state) +} + +func size_struct_message(p *Properties, base structPointer) int { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return 0 + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n0 := len(p.tagcode) + n1 := sizeRawBytes(data) + return n0 + n1 + } + + n0 := len(p.tagcode) + n1 := size_struct(p.sprop, structp) + n2 := sizeVarint(uint64(n1)) // size of encoded length + return n0 + n1 + n2 +} + +// Encode a group struct. +func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { + var state errorState + b := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(b) { + return ErrNil + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) + err := o.enc_struct(p.sprop, b) + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) + return state.err +} + +func size_struct_group(p *Properties, base structPointer) (n int) { + b := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(b) { + return 0 + } + + n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) + n += size_struct(p.sprop, b) + n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) + return +} + +// Encode a slice of bools ([]bool). +func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return ErrNil + } + for _, x := range s { + o.buf = append(o.buf, p.tagcode...) + v := uint64(0) + if x { + v = 1 + } + p.valEnc(o, v) + } + return nil +} + +func size_slice_bool(p *Properties, base structPointer) int { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return 0 + } + return l * (len(p.tagcode) + 1) // each bool takes exactly one byte +} + +// Encode a slice of bools ([]bool) in packed format. +func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(l)) // each bool takes exactly one byte + for _, x := range s { + v := uint64(0) + if x { + v = 1 + } + p.valEnc(o, v) + } + return nil +} + +func size_slice_packed_bool(p *Properties, base structPointer) (n int) { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return 0 + } + n += len(p.tagcode) + n += sizeVarint(uint64(l)) + n += l // each bool takes exactly one byte + return +} + +// Encode a slice of bytes ([]byte). +func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { + s := *structPointer_Bytes(base, p.field) + if s == nil { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(s) + return nil +} + +func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error { + s := *structPointer_Bytes(base, p.field) + if len(s) == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(s) + return nil +} + +func size_slice_byte(p *Properties, base structPointer) (n int) { + s := *structPointer_Bytes(base, p.field) + if s == nil && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += sizeRawBytes(s) + return +} + +func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { + s := *structPointer_Bytes(base, p.field) + if len(s) == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += sizeRawBytes(s) + return +} + +// Encode a slice of int32s ([]int32). +func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + p.valEnc(o, uint64(x)) + } + return nil +} + +func size_slice_int32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + n += p.valSize(uint64(x)) + } + return +} + +// Encode a slice of int32s ([]int32) in packed format. +func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + p.valEnc(buf, uint64(x)) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_int32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + bufSize += p.valSize(uint64(x)) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of uint32s ([]uint32). +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + x := s.Index(i) + p.valEnc(o, uint64(x)) + } + return nil +} + +func size_slice_uint32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + x := s.Index(i) + n += p.valSize(uint64(x)) + } + return +} + +// Encode a slice of uint32s ([]uint32) in packed format. +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + p.valEnc(buf, uint64(s.Index(i))) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_uint32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + bufSize += p.valSize(uint64(s.Index(i))) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of int64s ([]int64). +func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, s.Index(i)) + } + return nil +} + +func size_slice_int64(p *Properties, base structPointer) (n int) { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + n += p.valSize(s.Index(i)) + } + return +} + +// Encode a slice of int64s ([]int64) in packed format. +func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + p.valEnc(buf, s.Index(i)) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_int64(p *Properties, base structPointer) (n int) { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + bufSize += p.valSize(s.Index(i)) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of slice of bytes ([][]byte). +func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error { + ss := *structPointer_BytesSlice(base, p.field) + l := len(ss) + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(ss[i]) + } + return nil +} + +func size_slice_slice_byte(p *Properties, base structPointer) (n int) { + ss := *structPointer_BytesSlice(base, p.field) + l := len(ss) + if l == 0 { + return 0 + } + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + n += sizeRawBytes(ss[i]) + } + return +} + +// Encode a slice of strings ([]string). +func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error { + ss := *structPointer_StringSlice(base, p.field) + l := len(ss) + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(ss[i]) + } + return nil +} + +func size_slice_string(p *Properties, base structPointer) (n int) { + ss := *structPointer_StringSlice(base, p.field) + l := len(ss) + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + n += sizeStringBytes(ss[i]) + } + return +} + +// Encode a slice of message structs ([]*struct). +func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error { + var state errorState + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + for i := 0; i < l; i++ { + structp := s.Index(i) + if structPointer_IsNil(structp) { + return errRepeatedHasNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + continue + } + + o.buf = append(o.buf, p.tagcode...) + err := o.enc_len_struct(p.sprop, structp, &state) + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return errRepeatedHasNil + } + return err + } + } + return state.err +} + +func size_slice_struct_message(p *Properties, base structPointer) (n int) { + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + structp := s.Index(i) + if structPointer_IsNil(structp) { + return // return the size up to this point + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n += len(p.tagcode) + n += sizeRawBytes(data) + continue + } + + n0 := size_struct(p.sprop, structp) + n1 := sizeVarint(uint64(n0)) // size of encoded length + n += n0 + n1 + } + return +} + +// Encode a slice of group structs ([]*struct). +func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error { + var state errorState + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + for i := 0; i < l; i++ { + b := s.Index(i) + if structPointer_IsNil(b) { + return errRepeatedHasNil + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) + + err := o.enc_struct(p.sprop, b) + + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return errRepeatedHasNil + } + return err + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) + } + return state.err +} + +func size_slice_struct_group(p *Properties, base structPointer) (n int) { + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup)) + n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup)) + for i := 0; i < l; i++ { + b := s.Index(i) + if structPointer_IsNil(b) { + return // return size up to this point + } + + n += size_struct(p.sprop, b) + } + return +} + +// Encode an extension map. +func (o *Buffer) enc_map(p *Properties, base structPointer) error { + v := *structPointer_ExtMap(base, p.field) + if err := encodeExtensionMap(v); err != nil { + return err + } + // Fast-path for common cases: zero or one extensions. + if len(v) <= 1 { + for _, e := range v { + o.buf = append(o.buf, e.enc...) + } + return nil + } + + // Sort keys to provide a deterministic encoding. + keys := make([]int, 0, len(v)) + for k := range v { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + o.buf = append(o.buf, v[int32(k)].enc...) + } + return nil +} + +func size_map(p *Properties, base structPointer) int { + v := *structPointer_ExtMap(base, p.field) + return sizeExtensionMap(v) +} + +// Encode a map field. +func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { + var state errorState // XXX: or do we need to plumb this through? + + /* + A map defined as + map map_field = N; + is encoded in the same way as + message MapFieldEntry { + key_type key = 1; + value_type value = 2; + } + repeated MapFieldEntry map_field = N; + */ + + v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V + if v.Len() == 0 { + return nil + } + + keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) + + enc := func() error { + if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { + return err + } + if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil { + return err + } + return nil + } + + // Don't sort map keys. It is not required by the spec, and C++ doesn't do it. + for _, key := range v.MapKeys() { + val := v.MapIndex(key) + + // The only illegal map entry values are nil message pointers. + if val.Kind() == reflect.Ptr && val.IsNil() { + return errors.New("proto: map has nil element") + } + + keycopy.Set(key) + valcopy.Set(val) + + o.buf = append(o.buf, p.tagcode...) + if err := o.enc_len_thing(enc, &state); err != nil { + return err + } + } + return nil +} + +func size_new_map(p *Properties, base structPointer) int { + v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V + + keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) + + n := 0 + for _, key := range v.MapKeys() { + val := v.MapIndex(key) + keycopy.Set(key) + valcopy.Set(val) + + // Tag codes for key and val are the responsibility of the sub-sizer. + keysize := p.mkeyprop.size(p.mkeyprop, keybase) + valsize := p.mvalprop.size(p.mvalprop, valbase) + entry := keysize + valsize + // Add on tag code and length of map entry itself. + n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry + } + return n +} + +// mapEncodeScratch returns a new reflect.Value matching the map's value type, +// and a structPointer suitable for passing to an encoder or sizer. +func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) { + // Prepare addressable doubly-indirect placeholders for the key and value types. + // This is needed because the element-type encoders expect **T, but the map iteration produces T. + + keycopy = reflect.New(mapType.Key()).Elem() // addressable K + keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K + keyptr.Set(keycopy.Addr()) // + keybase = toStructPointer(keyptr.Addr()) // **K + + // Value types are more varied and require special handling. + switch mapType.Elem().Kind() { + case reflect.Slice: + // []byte + var dummy []byte + valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte + valbase = toStructPointer(valcopy.Addr()) + case reflect.Ptr: + // message; the generated field type is map[K]*Msg (so V is *Msg), + // so we only need one level of indirection. + valcopy = reflect.New(mapType.Elem()).Elem() // addressable V + valbase = toStructPointer(valcopy.Addr()) + default: + // everything else + valcopy = reflect.New(mapType.Elem()).Elem() // addressable V + valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V + valptr.Set(valcopy.Addr()) // + valbase = toStructPointer(valptr.Addr()) // **V + } + return +} + +// Encode a struct. +func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { + var state errorState + // Encode fields in tag order so that decoders may use optimizations + // that depend on the ordering. + // https://developers.google.com/protocol-buffers/docs/encoding#order + for _, i := range prop.order { + p := prop.Prop[i] + if p.enc != nil { + err := p.enc(o, p, base) + if err != nil { + if err == ErrNil { + if p.Required && state.err == nil { + state.err = &RequiredNotSetError{p.Name} + } + } else if err == errRepeatedHasNil { + // Give more context to nil values in repeated fields. + return errors.New("repeated field " + p.OrigName + " has nil element") + } else if !state.shouldContinue(err, p) { + return err + } + } + } + } + + // Do oneof fields. + if prop.oneofMarshaler != nil { + m := structPointer_Interface(base, prop.stype).(Message) + if err := prop.oneofMarshaler(m, o); err != nil { + return err + } + } + + // Add unrecognized fields at the end. + if prop.unrecField.IsValid() { + v := *structPointer_Bytes(base, prop.unrecField) + if len(v) > 0 { + o.buf = append(o.buf, v...) + } + } + + return state.err +} + +func size_struct(prop *StructProperties, base structPointer) (n int) { + for _, i := range prop.order { + p := prop.Prop[i] + if p.size != nil { + n += p.size(p, base) + } + } + + // Add unrecognized fields at the end. + if prop.unrecField.IsValid() { + v := *structPointer_Bytes(base, prop.unrecField) + n += len(v) + } + + // Factor in any oneof fields. + if prop.oneofSizer != nil { + m := structPointer_Interface(base, prop.stype).(Message) + n += prop.oneofSizer(m) + } + + return +} + +var zeroes [20]byte // longer than any conceivable sizeVarint + +// Encode a struct, preceded by its encoded length (as a varint). +func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error { + return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state) +} + +// Encode something, preceded by its encoded length (as a varint). +func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error { + iLen := len(o.buf) + o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length + iMsg := len(o.buf) + err := enc() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + lMsg := len(o.buf) - iMsg + lLen := sizeVarint(uint64(lMsg)) + switch x := lLen - (iMsg - iLen); { + case x > 0: // actual length is x bytes larger than the space we reserved + // Move msg x bytes right. + o.buf = append(o.buf, zeroes[:x]...) + copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) + case x < 0: // actual length is x bytes smaller than the space we reserved + // Move msg x bytes left. + copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) + o.buf = o.buf[:len(o.buf)+x] // x is negative + } + // Encode the length in the reserved space. + o.buf = o.buf[:iLen] + o.EncodeVarint(uint64(lMsg)) + o.buf = o.buf[:len(o.buf)+lMsg] + return state.err +} + +// errorState maintains the first error that occurs and updates that error +// with additional context. +type errorState struct { + err error +} + +// shouldContinue reports whether encoding should continue upon encountering the +// given error. If the error is RequiredNotSetError, shouldContinue returns true +// and, if this is the first appearance of that error, remembers it for future +// reporting. +// +// If prop is not nil, it may update any error with additional context about the +// field with the error. +func (s *errorState) shouldContinue(err error, prop *Properties) bool { + // Ignore unset required fields. + reqNotSet, ok := err.(*RequiredNotSetError) + if !ok { + return false + } + if s.err == nil { + if prop != nil { + err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field} + } + s.err = err + } + return true +} diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go new file mode 100644 index 000000000..f5db1def3 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/equal.go @@ -0,0 +1,276 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer comparison. + +package proto + +import ( + "bytes" + "log" + "reflect" + "strings" +) + +/* +Equal returns true iff protocol buffers a and b are equal. +The arguments must both be pointers to protocol buffer structs. + +Equality is defined in this way: + - Two messages are equal iff they are the same type, + corresponding fields are equal, unknown field sets + are equal, and extensions sets are equal. + - Two set scalar fields are equal iff their values are equal. + If the fields are of a floating-point type, remember that + NaN != x for all x, including NaN. If the message is defined + in a proto3 .proto file, fields are not "set"; specifically, + zero length proto3 "bytes" fields are equal (nil == {}). + - Two repeated fields are equal iff their lengths are the same, + and their corresponding elements are equal (a "bytes" field, + although represented by []byte, is not a repeated field) + - Two unset fields are equal. + - Two unknown field sets are equal if their current + encoded state is equal. + - Two extension sets are equal iff they have corresponding + elements that are pairwise equal. + - Every other combination of things are not equal. + +The return value is undefined if a and b are not protocol buffers. +*/ +func Equal(a, b Message) bool { + if a == nil || b == nil { + return a == b + } + v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) + if v1.Type() != v2.Type() { + return false + } + if v1.Kind() == reflect.Ptr { + if v1.IsNil() { + return v2.IsNil() + } + if v2.IsNil() { + return false + } + v1, v2 = v1.Elem(), v2.Elem() + } + if v1.Kind() != reflect.Struct { + return false + } + return equalStruct(v1, v2) +} + +// v1 and v2 are known to have the same type. +func equalStruct(v1, v2 reflect.Value) bool { + sprop := GetProperties(v1.Type()) + for i := 0; i < v1.NumField(); i++ { + f := v1.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + f1, f2 := v1.Field(i), v2.Field(i) + if f.Type.Kind() == reflect.Ptr { + if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { + // both unset + continue + } else if n1 != n2 { + // set/unset mismatch + return false + } + b1, ok := f1.Interface().(raw) + if ok { + b2 := f2.Interface().(raw) + // RawMessage + if !bytes.Equal(b1.Bytes(), b2.Bytes()) { + return false + } + continue + } + f1, f2 = f1.Elem(), f2.Elem() + } + if !equalAny(f1, f2, sprop.Prop[i]) { + return false + } + } + + if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_extensions") + if !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { + return false + } + } + + uf := v1.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return true + } + + u1 := uf.Bytes() + u2 := v2.FieldByName("XXX_unrecognized").Bytes() + if !bytes.Equal(u1, u2) { + return false + } + + return true +} + +// v1 and v2 are known to have the same type. +// prop may be nil. +func equalAny(v1, v2 reflect.Value, prop *Properties) bool { + if v1.Type() == protoMessageType { + m1, _ := v1.Interface().(Message) + m2, _ := v2.Interface().(Message) + return Equal(m1, m2) + } + switch v1.Kind() { + case reflect.Bool: + return v1.Bool() == v2.Bool() + case reflect.Float32, reflect.Float64: + return v1.Float() == v2.Float() + case reflect.Int32, reflect.Int64: + return v1.Int() == v2.Int() + case reflect.Interface: + // Probably a oneof field; compare the inner values. + n1, n2 := v1.IsNil(), v2.IsNil() + if n1 || n2 { + return n1 == n2 + } + e1, e2 := v1.Elem(), v2.Elem() + if e1.Type() != e2.Type() { + return false + } + return equalAny(e1, e2, nil) + case reflect.Map: + if v1.Len() != v2.Len() { + return false + } + for _, key := range v1.MapKeys() { + val2 := v2.MapIndex(key) + if !val2.IsValid() { + // This key was not found in the second map. + return false + } + if !equalAny(v1.MapIndex(key), val2, nil) { + return false + } + } + return true + case reflect.Ptr: + return equalAny(v1.Elem(), v2.Elem(), prop) + case reflect.Slice: + if v1.Type().Elem().Kind() == reflect.Uint8 { + // short circuit: []byte + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value. + if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) + } + + if v1.Len() != v2.Len() { + return false + } + for i := 0; i < v1.Len(); i++ { + if !equalAny(v1.Index(i), v2.Index(i), prop) { + return false + } + } + return true + case reflect.String: + return v1.Interface().(string) == v2.Interface().(string) + case reflect.Struct: + return equalStruct(v1, v2) + case reflect.Uint32, reflect.Uint64: + return v1.Uint() == v2.Uint() + } + + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to compare %v", v1) + return false +} + +// base is the struct type that the extensions are based on. +// em1 and em2 are extension maps. +func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool { + if len(em1) != len(em2) { + return false + } + + for extNum, e1 := range em1 { + e2, ok := em2[extNum] + if !ok { + return false + } + + m1, m2 := e1.value, e2.value + + if m1 != nil && m2 != nil { + // Both are unencoded. + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + continue + } + + // At least one is encoded. To do a semantically correct comparison + // we need to unmarshal them first. + var desc *ExtensionDesc + if m := extensionMaps[base]; m != nil { + desc = m[extNum] + } + if desc == nil { + log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) + continue + } + var err error + if m1 == nil { + m1, err = decodeExtension(e1.enc, desc) + } + if m2 == nil && err == nil { + m2, err = decodeExtension(e2.enc, desc) + } + if err != nil { + // The encoded form is invalid. + log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) + return false + } + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + } + + return true +} diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go new file mode 100644 index 000000000..054f4f1df --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/extensions.go @@ -0,0 +1,399 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Types and routines for supporting protocol buffer extensions. + */ + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "sync" +) + +// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. +var ErrMissingExtension = errors.New("proto: missing extension") + +// ExtensionRange represents a range of message extensions for a protocol buffer. +// Used in code generated by the protocol compiler. +type ExtensionRange struct { + Start, End int32 // both inclusive +} + +// extendableProto is an interface implemented by any protocol buffer that may be extended. +type extendableProto interface { + Message + ExtensionRangeArray() []ExtensionRange + ExtensionMap() map[int32]Extension +} + +var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() + +// ExtensionDesc represents an extension specification. +// Used in generated code from the protocol compiler. +type ExtensionDesc struct { + ExtendedType Message // nil pointer to the type that is being extended + ExtensionType interface{} // nil pointer to the extension type + Field int32 // field number + Name string // fully-qualified name of extension, for text formatting + Tag string // protobuf tag style +} + +func (ed *ExtensionDesc) repeated() bool { + t := reflect.TypeOf(ed.ExtensionType) + return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 +} + +// Extension represents an extension in a message. +type Extension struct { + // When an extension is stored in a message using SetExtension + // only desc and value are set. When the message is marshaled + // enc will be set to the encoded form of the message. + // + // When a message is unmarshaled and contains extensions, each + // extension will have only enc set. When such an extension is + // accessed using GetExtension (or GetExtensions) desc and value + // will be set. + desc *ExtensionDesc + value interface{} + enc []byte +} + +// SetRawExtension is for testing only. +func SetRawExtension(base extendableProto, id int32, b []byte) { + base.ExtensionMap()[id] = Extension{enc: b} +} + +// isExtensionField returns true iff the given field number is in an extension range. +func isExtensionField(pb extendableProto, field int32) bool { + for _, er := range pb.ExtensionRangeArray() { + if er.Start <= field && field <= er.End { + return true + } + } + return false +} + +// checkExtensionTypes checks that the given extension is valid for pb. +func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { + // Check the extended type. + if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b { + return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) + } + // Check the range. + if !isExtensionField(pb, extension.Field) { + return errors.New("proto: bad extension number; not in declared ranges") + } + return nil +} + +// extPropKey is sufficient to uniquely identify an extension. +type extPropKey struct { + base reflect.Type + field int32 +} + +var extProp = struct { + sync.RWMutex + m map[extPropKey]*Properties +}{ + m: make(map[extPropKey]*Properties), +} + +func extensionProperties(ed *ExtensionDesc) *Properties { + key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} + + extProp.RLock() + if prop, ok := extProp.m[key]; ok { + extProp.RUnlock() + return prop + } + extProp.RUnlock() + + extProp.Lock() + defer extProp.Unlock() + // Check again. + if prop, ok := extProp.m[key]; ok { + return prop + } + + prop := new(Properties) + prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) + extProp.m[key] = prop + return prop +} + +// encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m. +func encodeExtensionMap(m map[int32]Extension) error { + for k, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + et := reflect.TypeOf(e.desc.ExtensionType) + props := extensionProperties(e.desc) + + p := NewBuffer(nil) + // If e.value has type T, the encoder expects a *struct{ X T }. + // Pass a *T with a zero field and hope it all works out. + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(e.value)) + if err := props.enc(p, props, toStructPointer(x)); err != nil { + return err + } + e.enc = p.buf + m[k] = e + } + return nil +} + +func sizeExtensionMap(m map[int32]Extension) (n int) { + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + et := reflect.TypeOf(e.desc.ExtensionType) + props := extensionProperties(e.desc) + + // If e.value has type T, the encoder expects a *struct{ X T }. + // Pass a *T with a zero field and hope it all works out. + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(e.value)) + n += props.size(props, toStructPointer(x)) + } + return +} + +// HasExtension returns whether the given extension is present in pb. +func HasExtension(pb extendableProto, extension *ExtensionDesc) bool { + // TODO: Check types, field numbers, etc.? + _, ok := pb.ExtensionMap()[extension.Field] + return ok +} + +// ClearExtension removes the given extension from pb. +func ClearExtension(pb extendableProto, extension *ExtensionDesc) { + // TODO: Check types, field numbers, etc.? + delete(pb.ExtensionMap(), extension.Field) +} + +// GetExtension parses and returns the given extension of pb. +// If the extension is not present and has no default value it returns ErrMissingExtension. +func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) { + if err := checkExtensionTypes(pb, extension); err != nil { + return nil, err + } + + emap := pb.ExtensionMap() + e, ok := emap[extension.Field] + if !ok { + // defaultExtensionValue returns the default value or + // ErrMissingExtension if there is no default. + return defaultExtensionValue(extension) + } + + if e.value != nil { + // Already decoded. Check the descriptor, though. + if e.desc != extension { + // This shouldn't happen. If it does, it means that + // GetExtension was called twice with two different + // descriptors with the same field number. + return nil, errors.New("proto: descriptor conflict") + } + return e.value, nil + } + + v, err := decodeExtension(e.enc, extension) + if err != nil { + return nil, err + } + + // Remember the decoded version and drop the encoded version. + // That way it is safe to mutate what we return. + e.value = v + e.desc = extension + e.enc = nil + emap[extension.Field] = e + return e.value, nil +} + +// defaultExtensionValue returns the default value for extension. +// If no default for an extension is defined ErrMissingExtension is returned. +func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { + t := reflect.TypeOf(extension.ExtensionType) + props := extensionProperties(extension) + + sf, _, err := fieldDefault(t, props) + if err != nil { + return nil, err + } + + if sf == nil || sf.value == nil { + // There is no default value. + return nil, ErrMissingExtension + } + + if t.Kind() != reflect.Ptr { + // We do not need to return a Ptr, we can directly return sf.value. + return sf.value, nil + } + + // We need to return an interface{} that is a pointer to sf.value. + value := reflect.New(t).Elem() + value.Set(reflect.New(value.Type().Elem())) + if sf.kind == reflect.Int32 { + // We may have an int32 or an enum, but the underlying data is int32. + // Since we can't set an int32 into a non int32 reflect.value directly + // set it as a int32. + value.Elem().SetInt(int64(sf.value.(int32))) + } else { + value.Elem().Set(reflect.ValueOf(sf.value)) + } + return value.Interface(), nil +} + +// decodeExtension decodes an extension encoded in b. +func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { + o := NewBuffer(b) + + t := reflect.TypeOf(extension.ExtensionType) + + props := extensionProperties(extension) + + // t is a pointer to a struct, pointer to basic type or a slice. + // Allocate a "field" to store the pointer/slice itself; the + // pointer/slice will be stored here. We pass + // the address of this field to props.dec. + // This passes a zero field and a *t and lets props.dec + // interpret it as a *struct{ x t }. + value := reflect.New(t).Elem() + + for { + // Discard wire type and field number varint. It isn't needed. + if _, err := o.DecodeVarint(); err != nil { + return nil, err + } + + if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { + return nil, err + } + + if o.index >= len(o.buf) { + break + } + } + return value.Interface(), nil +} + +// GetExtensions returns a slice of the extensions present in pb that are also listed in es. +// The returned slice has the same length as es; missing extensions will appear as nil elements. +func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { + epb, ok := pb.(extendableProto) + if !ok { + err = errors.New("proto: not an extendable proto") + return + } + extensions = make([]interface{}, len(es)) + for i, e := range es { + extensions[i], err = GetExtension(epb, e) + if err == ErrMissingExtension { + err = nil + } + if err != nil { + return + } + } + return +} + +// SetExtension sets the specified extension of pb to the specified value. +func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error { + if err := checkExtensionTypes(pb, extension); err != nil { + return err + } + typ := reflect.TypeOf(extension.ExtensionType) + if typ != reflect.TypeOf(value) { + return errors.New("proto: bad extension value type") + } + // nil extension values need to be caught early, because the + // encoder can't distinguish an ErrNil due to a nil extension + // from an ErrNil due to a missing field. Extensions are + // always optional, so the encoder would just swallow the error + // and drop all the extensions from the encoded message. + if reflect.ValueOf(value).IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) + } + + pb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value} + return nil +} + +// A global registry of extensions. +// The generated code will register the generated descriptors by calling RegisterExtension. + +var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) + +// RegisterExtension is called from the generated code. +func RegisterExtension(desc *ExtensionDesc) { + st := reflect.TypeOf(desc.ExtendedType).Elem() + m := extensionMaps[st] + if m == nil { + m = make(map[int32]*ExtensionDesc) + extensionMaps[st] = m + } + if _, ok := m[desc.Field]; ok { + panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) + } + m[desc.Field] = desc +} + +// RegisteredExtensions returns a map of the registered extensions of a +// protocol buffer struct, indexed by the extension number. +// The argument pb should be a nil pointer to the struct type. +func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { + return extensionMaps[reflect.TypeOf(pb).Elem()] +} diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go new file mode 100644 index 000000000..42a58c6fd --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/lib.go @@ -0,0 +1,893 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package proto converts data structures to and from the wire format of +protocol buffers. It works in concert with the Go source code generated +for .proto files by the protocol compiler. + +A summary of the properties of the protocol buffer interface +for a protocol buffer variable v: + + - Names are turned from camel_case to CamelCase for export. + - There are no methods on v to set fields; just treat + them as structure fields. + - There are getters that return a field's value if set, + and return the field's default value if unset. + The getters work even if the receiver is a nil message. + - The zero value for a struct is its correct initialization state. + All desired fields must be set before marshaling. + - A Reset() method will restore a protobuf struct to its zero state. + - Non-repeated fields are pointers to the values; nil means unset. + That is, optional or required field int32 f becomes F *int32. + - Repeated fields are slices. + - Helper functions are available to aid the setting of fields. + msg.Foo = proto.String("hello") // set field + - Constants are defined to hold the default values of all fields that + have them. They have the form Default_StructName_FieldName. + Because the getter methods handle defaulted values, + direct use of these constants should be rare. + - Enums are given type names and maps from names to values. + Enum values are prefixed by the enclosing message's name, or by the + enum's type name if it is a top-level enum. Enum types have a String + method, and a Enum method to assist in message construction. + - Nested messages, groups and enums have type names prefixed with the name of + the surrounding message type. + - Extensions are given descriptor names that start with E_, + followed by an underscore-delimited list of the nested messages + that contain it (if any) followed by the CamelCased name of the + extension field itself. HasExtension, ClearExtension, GetExtension + and SetExtension are functions for manipulating extensions. + - Oneof field sets are given a single field in their message, + with distinguished wrapper types for each possible field value. + - Marshal and Unmarshal are functions to encode and decode the wire format. + +When the .proto file specifies `syntax="proto3"`, there are some differences: + + - Non-repeated fields of non-message type are values instead of pointers. + - Getters are only generated for message and oneof fields. + - Enum types do not get an Enum method. + +The simplest way to describe this is to see an example. +Given file test.proto, containing + + package example; + + enum FOO { X = 17; } + + message Test { + required string label = 1; + optional int32 type = 2 [default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4 { + required string RequiredField = 5; + } + oneof union { + int32 number = 6; + string name = 7; + } + } + +The resulting file, test.pb.go, is: + + package example + + import proto "github.com/golang/protobuf/proto" + import math "math" + + type FOO int32 + const ( + FOO_X FOO = 17 + ) + var FOO_name = map[int32]string{ + 17: "X", + } + var FOO_value = map[string]int32{ + "X": 17, + } + + func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p + } + func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) + } + func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data) + if err != nil { + return err + } + *x = FOO(value) + return nil + } + + type Test struct { + Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` + Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` + Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` + Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + // Types that are valid to be assigned to Union: + // *Test_Number + // *Test_Name + Union isTest_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` + } + func (m *Test) Reset() { *m = Test{} } + func (m *Test) String() string { return proto.CompactTextString(m) } + func (*Test) ProtoMessage() {} + + type isTest_Union interface { + isTest_Union() + } + + type Test_Number struct { + Number int32 `protobuf:"varint,6,opt,name=number"` + } + type Test_Name struct { + Name string `protobuf:"bytes,7,opt,name=name"` + } + + func (*Test_Number) isTest_Union() {} + func (*Test_Name) isTest_Union() {} + + func (m *Test) GetUnion() isTest_Union { + if m != nil { + return m.Union + } + return nil + } + const Default_Test_Type int32 = 77 + + func (m *Test) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" + } + + func (m *Test) GetType() int32 { + if m != nil && m.Type != nil { + return *m.Type + } + return Default_Test_Type + } + + func (m *Test) GetOptionalgroup() *Test_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil + } + + type Test_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` + } + func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } + func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } + + func (m *Test_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" + } + + func (m *Test) GetNumber() int32 { + if x, ok := m.GetUnion().(*Test_Number); ok { + return x.Number + } + return 0 + } + + func (m *Test) GetName() string { + if x, ok := m.GetUnion().(*Test_Name); ok { + return x.Name + } + return "" + } + + func init() { + proto.RegisterEnum("example.FOO", FOO_name, FOO_value) + } + +To create and play with a Test object: + + package main + + import ( + "log" + + "github.com/golang/protobuf/proto" + pb "./example.pb" + ) + + func main() { + test := &pb.Test{ + Label: proto.String("hello"), + Type: proto.Int32(17), + Optionalgroup: &pb.Test_OptionalGroup{ + RequiredField: proto.String("good bye"), + }, + Union: &pb.Test_Name{"fred"}, + } + data, err := proto.Marshal(test) + if err != nil { + log.Fatal("marshaling error: ", err) + } + newTest := &pb.Test{} + err = proto.Unmarshal(data, newTest) + if err != nil { + log.Fatal("unmarshaling error: ", err) + } + // Now test and newTest contain the same data. + if test.GetLabel() != newTest.GetLabel() { + log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) + } + // Use a type switch to determine which oneof was set. + switch u := test.Union.(type) { + case *pb.Test_Number: // u.Number contains the number. + case *pb.Test_Name: // u.Name contains the string. + } + // etc. + } +*/ +package proto + +import ( + "encoding/json" + "fmt" + "log" + "reflect" + "sort" + "strconv" + "sync" +) + +// Message is implemented by generated protocol buffer messages. +type Message interface { + Reset() + String() string + ProtoMessage() +} + +// Stats records allocation details about the protocol buffer encoders +// and decoders. Useful for tuning the library itself. +type Stats struct { + Emalloc uint64 // mallocs in encode + Dmalloc uint64 // mallocs in decode + Encode uint64 // number of encodes + Decode uint64 // number of decodes + Chit uint64 // number of cache hits + Cmiss uint64 // number of cache misses + Size uint64 // number of sizes +} + +// Set to true to enable stats collection. +const collectStats = false + +var stats Stats + +// GetStats returns a copy of the global Stats structure. +func GetStats() Stats { return stats } + +// A Buffer is a buffer manager for marshaling and unmarshaling +// protocol buffers. It may be reused between invocations to +// reduce memory usage. It is not necessary to use a Buffer; +// the global functions Marshal and Unmarshal create a +// temporary Buffer and are fine for most applications. +type Buffer struct { + buf []byte // encode/decode byte stream + index int // write point + + // pools of basic types to amortize allocation. + bools []bool + uint32s []uint32 + uint64s []uint64 + + // extra pools, only used with pointer_reflect.go + int32s []int32 + int64s []int64 + float32s []float32 + float64s []float64 +} + +// NewBuffer allocates a new Buffer and initializes its internal data to +// the contents of the argument slice. +func NewBuffer(e []byte) *Buffer { + return &Buffer{buf: e} +} + +// Reset resets the Buffer, ready for marshaling a new protocol buffer. +func (p *Buffer) Reset() { + p.buf = p.buf[0:0] // for reading/writing + p.index = 0 // for reading +} + +// SetBuf replaces the internal buffer with the slice, +// ready for unmarshaling the contents of the slice. +func (p *Buffer) SetBuf(s []byte) { + p.buf = s + p.index = 0 +} + +// Bytes returns the contents of the Buffer. +func (p *Buffer) Bytes() []byte { return p.buf } + +/* + * Helper routines for simplifying the creation of optional fields of basic type. + */ + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { + return &v +} + +// Int32 is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it. +func Int32(v int32) *int32 { + return &v +} + +// Int is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it, but unlike Int32 +// its argument value is an int. +func Int(v int) *int32 { + p := new(int32) + *p = int32(v) + return p +} + +// Int64 is a helper routine that allocates a new int64 value +// to store v and returns a pointer to it. +func Int64(v int64) *int64 { + return &v +} + +// Float32 is a helper routine that allocates a new float32 value +// to store v and returns a pointer to it. +func Float32(v float32) *float32 { + return &v +} + +// Float64 is a helper routine that allocates a new float64 value +// to store v and returns a pointer to it. +func Float64(v float64) *float64 { + return &v +} + +// Uint32 is a helper routine that allocates a new uint32 value +// to store v and returns a pointer to it. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint64 is a helper routine that allocates a new uint64 value +// to store v and returns a pointer to it. +func Uint64(v uint64) *uint64 { + return &v +} + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { + return &v +} + +// EnumName is a helper function to simplify printing protocol buffer enums +// by name. Given an enum map and a value, it returns a useful string. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// UnmarshalJSONEnum is a helper function to simplify recovering enum int values +// from their JSON-encoded representation. Given a map from the enum's symbolic +// names to its int values, and a byte buffer containing the JSON-encoded +// value, it returns an int32 that can be cast to the enum type by the caller. +// +// The function can deal with both JSON representations, numeric and symbolic. +func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { + if data[0] == '"' { + // New style: enums are strings. + var repr string + if err := json.Unmarshal(data, &repr); err != nil { + return -1, err + } + val, ok := m[repr] + if !ok { + return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) + } + return val, nil + } + // Old style: enums are ints. + var val int32 + if err := json.Unmarshal(data, &val); err != nil { + return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) + } + return val, nil +} + +// DebugPrint dumps the encoded data in b in a debugging format with a header +// including the string s. Used in testing but made available for general debugging. +func (p *Buffer) DebugPrint(s string, b []byte) { + var u uint64 + + obuf := p.buf + index := p.index + p.buf = b + p.index = 0 + depth := 0 + + fmt.Printf("\n--- %s ---\n", s) + +out: + for { + for i := 0; i < depth; i++ { + fmt.Print(" ") + } + + index := p.index + if index == len(p.buf) { + break + } + + op, err := p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: fetching op err %v\n", index, err) + break out + } + tag := op >> 3 + wire := op & 7 + + switch wire { + default: + fmt.Printf("%3d: t=%3d unknown wire=%d\n", + index, tag, wire) + break out + + case WireBytes: + var r []byte + + r, err = p.DecodeRawBytes(false) + if err != nil { + break out + } + fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) + if len(r) <= 6 { + for i := 0; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } else { + for i := 0; i < 3; i++ { + fmt.Printf(" %.2x", r[i]) + } + fmt.Printf(" ..") + for i := len(r) - 3; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } + fmt.Printf("\n") + + case WireFixed32: + u, err = p.DecodeFixed32() + if err != nil { + fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) + + case WireFixed64: + u, err = p.DecodeFixed64() + if err != nil { + fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) + + case WireVarint: + u, err = p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) + + case WireStartGroup: + fmt.Printf("%3d: t=%3d start\n", index, tag) + depth++ + + case WireEndGroup: + depth-- + fmt.Printf("%3d: t=%3d end\n", index, tag) + } + } + + if depth != 0 { + fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) + } + fmt.Printf("\n") + + p.buf = obuf + p.index = index +} + +// SetDefaults sets unset protocol buffer fields to their default values. +// It only modifies fields that are both unset and have defined defaults. +// It recursively sets default values in any non-nil sub-messages. +func SetDefaults(pb Message) { + setDefaults(reflect.ValueOf(pb), true, false) +} + +// v is a pointer to a struct. +func setDefaults(v reflect.Value, recur, zeros bool) { + v = v.Elem() + + defaultMu.RLock() + dm, ok := defaults[v.Type()] + defaultMu.RUnlock() + if !ok { + dm = buildDefaultMessage(v.Type()) + defaultMu.Lock() + defaults[v.Type()] = dm + defaultMu.Unlock() + } + + for _, sf := range dm.scalars { + f := v.Field(sf.index) + if !f.IsNil() { + // field already set + continue + } + dv := sf.value + if dv == nil && !zeros { + // no explicit default, and don't want to set zeros + continue + } + fptr := f.Addr().Interface() // **T + // TODO: Consider batching the allocations we do here. + switch sf.kind { + case reflect.Bool: + b := new(bool) + if dv != nil { + *b = dv.(bool) + } + *(fptr.(**bool)) = b + case reflect.Float32: + f := new(float32) + if dv != nil { + *f = dv.(float32) + } + *(fptr.(**float32)) = f + case reflect.Float64: + f := new(float64) + if dv != nil { + *f = dv.(float64) + } + *(fptr.(**float64)) = f + case reflect.Int32: + // might be an enum + if ft := f.Type(); ft != int32PtrType { + // enum + f.Set(reflect.New(ft.Elem())) + if dv != nil { + f.Elem().SetInt(int64(dv.(int32))) + } + } else { + // int32 field + i := new(int32) + if dv != nil { + *i = dv.(int32) + } + *(fptr.(**int32)) = i + } + case reflect.Int64: + i := new(int64) + if dv != nil { + *i = dv.(int64) + } + *(fptr.(**int64)) = i + case reflect.String: + s := new(string) + if dv != nil { + *s = dv.(string) + } + *(fptr.(**string)) = s + case reflect.Uint8: + // exceptional case: []byte + var b []byte + if dv != nil { + db := dv.([]byte) + b = make([]byte, len(db)) + copy(b, db) + } else { + b = []byte{} + } + *(fptr.(*[]byte)) = b + case reflect.Uint32: + u := new(uint32) + if dv != nil { + *u = dv.(uint32) + } + *(fptr.(**uint32)) = u + case reflect.Uint64: + u := new(uint64) + if dv != nil { + *u = dv.(uint64) + } + *(fptr.(**uint64)) = u + default: + log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) + } + } + + for _, ni := range dm.nested { + f := v.Field(ni) + // f is *T or []*T or map[T]*T + switch f.Kind() { + case reflect.Ptr: + if f.IsNil() { + continue + } + setDefaults(f, recur, zeros) + + case reflect.Slice: + for i := 0; i < f.Len(); i++ { + e := f.Index(i) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + + case reflect.Map: + for _, k := range f.MapKeys() { + e := f.MapIndex(k) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + } + } +} + +var ( + // defaults maps a protocol buffer struct type to a slice of the fields, + // with its scalar fields set to their proto-declared non-zero default values. + defaultMu sync.RWMutex + defaults = make(map[reflect.Type]defaultMessage) + + int32PtrType = reflect.TypeOf((*int32)(nil)) +) + +// defaultMessage represents information about the default values of a message. +type defaultMessage struct { + scalars []scalarField + nested []int // struct field index of nested messages +} + +type scalarField struct { + index int // struct field index + kind reflect.Kind // element type (the T in *T or []T) + value interface{} // the proto-declared default value, or nil +} + +// t is a struct type. +func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { + sprop := GetProperties(t) + for _, prop := range sprop.Prop { + fi, ok := sprop.decoderTags.get(prop.Tag) + if !ok { + // XXX_unrecognized + continue + } + ft := t.Field(fi).Type + + sf, nested, err := fieldDefault(ft, prop) + switch { + case err != nil: + log.Print(err) + case nested: + dm.nested = append(dm.nested, fi) + case sf != nil: + sf.index = fi + dm.scalars = append(dm.scalars, *sf) + } + } + + return dm +} + +// fieldDefault returns the scalarField for field type ft. +// sf will be nil if the field can not have a default. +// nestedMessage will be true if this is a nested message. +// Note that sf.index is not set on return. +func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { + var canHaveDefault bool + switch ft.Kind() { + case reflect.Ptr: + if ft.Elem().Kind() == reflect.Struct { + nestedMessage = true + } else { + canHaveDefault = true // proto2 scalar field + } + + case reflect.Slice: + switch ft.Elem().Kind() { + case reflect.Ptr: + nestedMessage = true // repeated message + case reflect.Uint8: + canHaveDefault = true // bytes field + } + + case reflect.Map: + if ft.Elem().Kind() == reflect.Ptr { + nestedMessage = true // map with message values + } + } + + if !canHaveDefault { + if nestedMessage { + return nil, true, nil + } + return nil, false, nil + } + + // We now know that ft is a pointer or slice. + sf = &scalarField{kind: ft.Elem().Kind()} + + // scalar fields without defaults + if !prop.HasDefault { + return sf, false, nil + } + + // a scalar field: either *T or []byte + switch ft.Elem().Kind() { + case reflect.Bool: + x, err := strconv.ParseBool(prop.Default) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Float32: + x, err := strconv.ParseFloat(prop.Default, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) + } + sf.value = float32(x) + case reflect.Float64: + x, err := strconv.ParseFloat(prop.Default, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Int32: + x, err := strconv.ParseInt(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) + } + sf.value = int32(x) + case reflect.Int64: + x, err := strconv.ParseInt(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.String: + sf.value = prop.Default + case reflect.Uint8: + // []byte (not *uint8) + sf.value = []byte(prop.Default) + case reflect.Uint32: + x, err := strconv.ParseUint(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) + } + sf.value = uint32(x) + case reflect.Uint64: + x, err := strconv.ParseUint(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) + } + sf.value = x + default: + return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) + } + + return sf, false, nil +} + +// Map fields may have key types of non-float scalars, strings and enums. +// The easiest way to sort them in some deterministic order is to use fmt. +// If this turns out to be inefficient we can always consider other options, +// such as doing a Schwartzian transform. + +func mapKeys(vs []reflect.Value) sort.Interface { + s := mapKeySorter{ + vs: vs, + // default Less function: textual comparison + less: func(a, b reflect.Value) bool { + return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface()) + }, + } + + // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps; + // numeric keys are sorted numerically. + if len(vs) == 0 { + return s + } + switch vs[0].Kind() { + case reflect.Int32, reflect.Int64: + s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } + case reflect.Uint32, reflect.Uint64: + s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } + } + + return s +} + +type mapKeySorter struct { + vs []reflect.Value + less func(a, b reflect.Value) bool +} + +func (s mapKeySorter) Len() int { return len(s.vs) } +func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } +func (s mapKeySorter) Less(i, j int) bool { + return s.less(s.vs[i], s.vs[j]) +} + +// isProto3Zero reports whether v is a zero proto3 value. +func isProto3Zero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return !v.Bool() + case reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint32, reflect.Uint64: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.String: + return v.String() == "" + } + return false +} + +// ProtoPackageIsVersion1 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const ProtoPackageIsVersion1 = true diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go new file mode 100644 index 000000000..e25e01e63 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/message_set.go @@ -0,0 +1,280 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Support for message sets. + */ + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" +) + +// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. +// A message type ID is required for storing a protocol buffer in a message set. +var errNoMessageTypeID = errors.New("proto does not have a message type ID") + +// The first two types (_MessageSet_Item and messageSet) +// model what the protocol compiler produces for the following protocol message: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } +// That is the MessageSet wire format. We can't use a proto to generate these +// because that would introduce a circular dependency between it and this package. + +type _MessageSet_Item struct { + TypeId *int32 `protobuf:"varint,2,req,name=type_id"` + Message []byte `protobuf:"bytes,3,req,name=message"` +} + +type messageSet struct { + Item []*_MessageSet_Item `protobuf:"group,1,rep"` + XXX_unrecognized []byte + // TODO: caching? +} + +// Make sure messageSet is a Message. +var _ Message = (*messageSet)(nil) + +// messageTypeIder is an interface satisfied by a protocol buffer type +// that may be stored in a MessageSet. +type messageTypeIder interface { + MessageTypeId() int32 +} + +func (ms *messageSet) find(pb Message) *_MessageSet_Item { + mti, ok := pb.(messageTypeIder) + if !ok { + return nil + } + id := mti.MessageTypeId() + for _, item := range ms.Item { + if *item.TypeId == id { + return item + } + } + return nil +} + +func (ms *messageSet) Has(pb Message) bool { + if ms.find(pb) != nil { + return true + } + return false +} + +func (ms *messageSet) Unmarshal(pb Message) error { + if item := ms.find(pb); item != nil { + return Unmarshal(item.Message, pb) + } + if _, ok := pb.(messageTypeIder); !ok { + return errNoMessageTypeID + } + return nil // TODO: return error instead? +} + +func (ms *messageSet) Marshal(pb Message) error { + msg, err := Marshal(pb) + if err != nil { + return err + } + if item := ms.find(pb); item != nil { + // reuse existing item + item.Message = msg + return nil + } + + mti, ok := pb.(messageTypeIder) + if !ok { + return errNoMessageTypeID + } + + mtid := mti.MessageTypeId() + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: &mtid, + Message: msg, + }) + return nil +} + +func (ms *messageSet) Reset() { *ms = messageSet{} } +func (ms *messageSet) String() string { return CompactTextString(ms) } +func (*messageSet) ProtoMessage() {} + +// Support for the message_set_wire_format message option. + +func skipVarint(buf []byte) []byte { + i := 0 + for ; buf[i]&0x80 != 0; i++ { + } + return buf[i+1:] +} + +// MarshalMessageSet encodes the extension map represented by m in the message set wire format. +// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSet(m map[int32]Extension) ([]byte, error) { + if err := encodeExtensionMap(m); err != nil { + return nil, err + } + + // Sort extension IDs to provide a deterministic encoding. + // See also enc_map in encode.go. + ids := make([]int, 0, len(m)) + for id := range m { + ids = append(ids, int(id)) + } + sort.Ints(ids) + + ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))} + for _, id := range ids { + e := m[int32(id)] + // Remove the wire type and field number varint, as well as the length varint. + msg := skipVarint(skipVarint(e.enc)) + + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: Int32(int32(id)), + Message: msg, + }) + } + return Marshal(ms) +} + +// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error { + ms := new(messageSet) + if err := Unmarshal(buf, ms); err != nil { + return err + } + for _, item := range ms.Item { + id := *item.TypeId + msg := item.Message + + // Restore wire type and field number varint, plus length varint. + // Be careful to preserve duplicate items. + b := EncodeVarint(uint64(id)<<3 | WireBytes) + if ext, ok := m[id]; ok { + // Existing data; rip off the tag and length varint + // so we join the new data correctly. + // We can assume that ext.enc is set because we are unmarshaling. + o := ext.enc[len(b):] // skip wire type and field number + _, n := DecodeVarint(o) // calculate length of length varint + o = o[n:] // skip length varint + msg = append(o, msg...) // join old data and new data + } + b = append(b, EncodeVarint(uint64(len(msg)))...) + b = append(b, msg...) + + m[id] = Extension{enc: b} + } + return nil +} + +// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. +// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) { + var b bytes.Buffer + b.WriteByte('{') + + // Process the map in key order for deterministic output. + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) // int32Slice defined in text.go + + for i, id := range ids { + ext := m[id] + if i > 0 { + b.WriteByte(',') + } + + msd, ok := messageSetMap[id] + if !ok { + // Unknown type; we can't render it, so skip it. + continue + } + fmt.Fprintf(&b, `"[%s]":`, msd.name) + + x := ext.value + if x == nil { + x = reflect.New(msd.t.Elem()).Interface() + if err := Unmarshal(ext.enc, x.(Message)); err != nil { + return nil, err + } + } + d, err := json.Marshal(x) + if err != nil { + return nil, err + } + b.Write(d) + } + b.WriteByte('}') + return b.Bytes(), nil +} + +// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. +// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error { + // Common-case fast path. + if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { + return nil + } + + // This is fairly tricky, and it's not clear that it is needed. + return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") +} + +// A global registry of types that can be used in a MessageSet. + +var messageSetMap = make(map[int32]messageSetDesc) + +type messageSetDesc struct { + t reflect.Type // pointer to struct + name string +} + +// RegisterMessageSetType is called from the generated code. +func RegisterMessageSetType(m Message, fieldNum int32, name string) { + messageSetMap[fieldNum] = messageSetDesc{ + t: reflect.TypeOf(m), + name: name, + } +} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go new file mode 100644 index 000000000..749919d25 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go @@ -0,0 +1,479 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build appengine + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "math" + "reflect" +) + +// A structPointer is a pointer to a struct. +type structPointer struct { + v reflect.Value +} + +// toStructPointer returns a structPointer equivalent to the given reflect value. +// The reflect value must itself be a pointer to a struct. +func toStructPointer(v reflect.Value) structPointer { + return structPointer{v} +} + +// IsNil reports whether p is nil. +func structPointer_IsNil(p structPointer) bool { + return p.v.IsNil() +} + +// Interface returns the struct pointer as an interface value. +func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { + return p.v.Interface() +} + +// A field identifies a field in a struct, accessible from a structPointer. +// In this implementation, a field is identified by the sequence of field indices +// passed to reflect's FieldByIndex. +type field []int + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return f.Index +} + +// invalidField is an invalid field identifier. +var invalidField = field(nil) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { return f != nil } + +// field returns the given field in the struct as a reflect value. +func structPointer_field(p structPointer, f field) reflect.Value { + // Special case: an extension map entry with a value of type T + // passes a *T to the struct-handling code with a zero field, + // expecting that it will be treated as equivalent to *struct{ X T }, + // which has the same memory layout. We have to handle that case + // specially, because reflect will panic if we call FieldByIndex on a + // non-struct. + if f == nil { + return p.v.Elem() + } + + return p.v.Elem().FieldByIndex(f) +} + +// ifield returns the given field in the struct as an interface value. +func structPointer_ifield(p structPointer, f field) interface{} { + return structPointer_field(p, f).Addr().Interface() +} + +// Bytes returns the address of a []byte field in the struct. +func structPointer_Bytes(p structPointer, f field) *[]byte { + return structPointer_ifield(p, f).(*[]byte) +} + +// BytesSlice returns the address of a [][]byte field in the struct. +func structPointer_BytesSlice(p structPointer, f field) *[][]byte { + return structPointer_ifield(p, f).(*[][]byte) +} + +// Bool returns the address of a *bool field in the struct. +func structPointer_Bool(p structPointer, f field) **bool { + return structPointer_ifield(p, f).(**bool) +} + +// BoolVal returns the address of a bool field in the struct. +func structPointer_BoolVal(p structPointer, f field) *bool { + return structPointer_ifield(p, f).(*bool) +} + +// BoolSlice returns the address of a []bool field in the struct. +func structPointer_BoolSlice(p structPointer, f field) *[]bool { + return structPointer_ifield(p, f).(*[]bool) +} + +// String returns the address of a *string field in the struct. +func structPointer_String(p structPointer, f field) **string { + return structPointer_ifield(p, f).(**string) +} + +// StringVal returns the address of a string field in the struct. +func structPointer_StringVal(p structPointer, f field) *string { + return structPointer_ifield(p, f).(*string) +} + +// StringSlice returns the address of a []string field in the struct. +func structPointer_StringSlice(p structPointer, f field) *[]string { + return structPointer_ifield(p, f).(*[]string) +} + +// ExtMap returns the address of an extension map field in the struct. +func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { + return structPointer_ifield(p, f).(*map[int32]Extension) +} + +// NewAt returns the reflect.Value for a pointer to a field in the struct. +func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { + return structPointer_field(p, f).Addr() +} + +// SetStructPointer writes a *struct field in the struct. +func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { + structPointer_field(p, f).Set(q.v) +} + +// GetStructPointer reads a *struct field in the struct. +func structPointer_GetStructPointer(p structPointer, f field) structPointer { + return structPointer{structPointer_field(p, f)} +} + +// StructPointerSlice the address of a []*struct field in the struct. +func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { + return structPointerSlice{structPointer_field(p, f)} +} + +// A structPointerSlice represents the address of a slice of pointers to structs +// (themselves messages or groups). That is, v.Type() is *[]*struct{...}. +type structPointerSlice struct { + v reflect.Value +} + +func (p structPointerSlice) Len() int { return p.v.Len() } +func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } +func (p structPointerSlice) Append(q structPointer) { + p.v.Set(reflect.Append(p.v, q.v)) +} + +var ( + int32Type = reflect.TypeOf(int32(0)) + uint32Type = reflect.TypeOf(uint32(0)) + float32Type = reflect.TypeOf(float32(0)) + int64Type = reflect.TypeOf(int64(0)) + uint64Type = reflect.TypeOf(uint64(0)) + float64Type = reflect.TypeOf(float64(0)) +) + +// A word32 represents a field of type *int32, *uint32, *float32, or *enum. +// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. +type word32 struct { + v reflect.Value +} + +// IsNil reports whether p is nil. +func word32_IsNil(p word32) bool { + return p.v.IsNil() +} + +// Set sets p to point at a newly allocated word with bits set to x. +func word32_Set(p word32, o *Buffer, x uint32) { + t := p.v.Type().Elem() + switch t { + case int32Type: + if len(o.int32s) == 0 { + o.int32s = make([]int32, uint32PoolSize) + } + o.int32s[0] = int32(x) + p.v.Set(reflect.ValueOf(&o.int32s[0])) + o.int32s = o.int32s[1:] + return + case uint32Type: + if len(o.uint32s) == 0 { + o.uint32s = make([]uint32, uint32PoolSize) + } + o.uint32s[0] = x + p.v.Set(reflect.ValueOf(&o.uint32s[0])) + o.uint32s = o.uint32s[1:] + return + case float32Type: + if len(o.float32s) == 0 { + o.float32s = make([]float32, uint32PoolSize) + } + o.float32s[0] = math.Float32frombits(x) + p.v.Set(reflect.ValueOf(&o.float32s[0])) + o.float32s = o.float32s[1:] + return + } + + // must be enum + p.v.Set(reflect.New(t)) + p.v.Elem().SetInt(int64(int32(x))) +} + +// Get gets the bits pointed at by p, as a uint32. +func word32_Get(p word32) uint32 { + elem := p.v.Elem() + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32(p structPointer, f field) word32 { + return word32{structPointer_field(p, f)} +} + +// A word32Val represents a field of type int32, uint32, float32, or enum. +// That is, v.Type() is int32, uint32, float32, or enum and v is assignable. +type word32Val struct { + v reflect.Value +} + +// Set sets *p to x. +func word32Val_Set(p word32Val, x uint32) { + switch p.v.Type() { + case int32Type: + p.v.SetInt(int64(x)) + return + case uint32Type: + p.v.SetUint(uint64(x)) + return + case float32Type: + p.v.SetFloat(float64(math.Float32frombits(x))) + return + } + + // must be enum + p.v.SetInt(int64(int32(x))) +} + +// Get gets the bits pointed at by p, as a uint32. +func word32Val_Get(p word32Val) uint32 { + elem := p.v + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. +func structPointer_Word32Val(p structPointer, f field) word32Val { + return word32Val{structPointer_field(p, f)} +} + +// A word32Slice is a slice of 32-bit values. +// That is, v.Type() is []int32, []uint32, []float32, or []enum. +type word32Slice struct { + v reflect.Value +} + +func (p word32Slice) Append(x uint32) { + n, m := p.v.Len(), p.v.Cap() + if n < m { + p.v.SetLen(n + 1) + } else { + t := p.v.Type().Elem() + p.v.Set(reflect.Append(p.v, reflect.Zero(t))) + } + elem := p.v.Index(n) + switch elem.Kind() { + case reflect.Int32: + elem.SetInt(int64(int32(x))) + case reflect.Uint32: + elem.SetUint(uint64(x)) + case reflect.Float32: + elem.SetFloat(float64(math.Float32frombits(x))) + } +} + +func (p word32Slice) Len() int { + return p.v.Len() +} + +func (p word32Slice) Index(i int) uint32 { + elem := p.v.Index(i) + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. +func structPointer_Word32Slice(p structPointer, f field) word32Slice { + return word32Slice{structPointer_field(p, f)} +} + +// word64 is like word32 but for 64-bit values. +type word64 struct { + v reflect.Value +} + +func word64_Set(p word64, o *Buffer, x uint64) { + t := p.v.Type().Elem() + switch t { + case int64Type: + if len(o.int64s) == 0 { + o.int64s = make([]int64, uint64PoolSize) + } + o.int64s[0] = int64(x) + p.v.Set(reflect.ValueOf(&o.int64s[0])) + o.int64s = o.int64s[1:] + return + case uint64Type: + if len(o.uint64s) == 0 { + o.uint64s = make([]uint64, uint64PoolSize) + } + o.uint64s[0] = x + p.v.Set(reflect.ValueOf(&o.uint64s[0])) + o.uint64s = o.uint64s[1:] + return + case float64Type: + if len(o.float64s) == 0 { + o.float64s = make([]float64, uint64PoolSize) + } + o.float64s[0] = math.Float64frombits(x) + p.v.Set(reflect.ValueOf(&o.float64s[0])) + o.float64s = o.float64s[1:] + return + } + panic("unreachable") +} + +func word64_IsNil(p word64) bool { + return p.v.IsNil() +} + +func word64_Get(p word64) uint64 { + elem := p.v.Elem() + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return elem.Uint() + case reflect.Float64: + return math.Float64bits(elem.Float()) + } + panic("unreachable") +} + +func structPointer_Word64(p structPointer, f field) word64 { + return word64{structPointer_field(p, f)} +} + +// word64Val is like word32Val but for 64-bit values. +type word64Val struct { + v reflect.Value +} + +func word64Val_Set(p word64Val, o *Buffer, x uint64) { + switch p.v.Type() { + case int64Type: + p.v.SetInt(int64(x)) + return + case uint64Type: + p.v.SetUint(x) + return + case float64Type: + p.v.SetFloat(math.Float64frombits(x)) + return + } + panic("unreachable") +} + +func word64Val_Get(p word64Val) uint64 { + elem := p.v + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return elem.Uint() + case reflect.Float64: + return math.Float64bits(elem.Float()) + } + panic("unreachable") +} + +func structPointer_Word64Val(p structPointer, f field) word64Val { + return word64Val{structPointer_field(p, f)} +} + +type word64Slice struct { + v reflect.Value +} + +func (p word64Slice) Append(x uint64) { + n, m := p.v.Len(), p.v.Cap() + if n < m { + p.v.SetLen(n + 1) + } else { + t := p.v.Type().Elem() + p.v.Set(reflect.Append(p.v, reflect.Zero(t))) + } + elem := p.v.Index(n) + switch elem.Kind() { + case reflect.Int64: + elem.SetInt(int64(int64(x))) + case reflect.Uint64: + elem.SetUint(uint64(x)) + case reflect.Float64: + elem.SetFloat(float64(math.Float64frombits(x))) + } +} + +func (p word64Slice) Len() int { + return p.v.Len() +} + +func (p word64Slice) Index(i int) uint64 { + elem := p.v.Index(i) + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return uint64(elem.Uint()) + case reflect.Float64: + return math.Float64bits(float64(elem.Float())) + } + panic("unreachable") +} + +func structPointer_Word64Slice(p structPointer, f field) word64Slice { + return word64Slice{structPointer_field(p, f)} +} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go new file mode 100644 index 000000000..e9be0fe92 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go @@ -0,0 +1,266 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !appengine + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "unsafe" +) + +// NOTE: These type_Foo functions would more idiomatically be methods, +// but Go does not allow methods on pointer types, and we must preserve +// some pointer type for the garbage collector. We use these +// funcs with clunky names as our poor approximation to methods. +// +// An alternative would be +// type structPointer struct { p unsafe.Pointer } +// but that does not registerize as well. + +// A structPointer is a pointer to a struct. +type structPointer unsafe.Pointer + +// toStructPointer returns a structPointer equivalent to the given reflect value. +func toStructPointer(v reflect.Value) structPointer { + return structPointer(unsafe.Pointer(v.Pointer())) +} + +// IsNil reports whether p is nil. +func structPointer_IsNil(p structPointer) bool { + return p == nil +} + +// Interface returns the struct pointer, assumed to have element type t, +// as an interface value. +func structPointer_Interface(p structPointer, t reflect.Type) interface{} { + return reflect.NewAt(t, unsafe.Pointer(p)).Interface() +} + +// A field identifies a field in a struct, accessible from a structPointer. +// In this implementation, a field is identified by its byte offset from the start of the struct. +type field uintptr + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return field(f.Offset) +} + +// invalidField is an invalid field identifier. +const invalidField = ^field(0) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { + return f != ^field(0) +} + +// Bytes returns the address of a []byte field in the struct. +func structPointer_Bytes(p structPointer, f field) *[]byte { + return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BytesSlice returns the address of a [][]byte field in the struct. +func structPointer_BytesSlice(p structPointer, f field) *[][]byte { + return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// Bool returns the address of a *bool field in the struct. +func structPointer_Bool(p structPointer, f field) **bool { + return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BoolVal returns the address of a bool field in the struct. +func structPointer_BoolVal(p structPointer, f field) *bool { + return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BoolSlice returns the address of a []bool field in the struct. +func structPointer_BoolSlice(p structPointer, f field) *[]bool { + return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// String returns the address of a *string field in the struct. +func structPointer_String(p structPointer, f field) **string { + return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StringVal returns the address of a string field in the struct. +func structPointer_StringVal(p structPointer, f field) *string { + return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StringSlice returns the address of a []string field in the struct. +func structPointer_StringSlice(p structPointer, f field) *[]string { + return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// ExtMap returns the address of an extension map field in the struct. +func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { + return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// NewAt returns the reflect.Value for a pointer to a field in the struct. +func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { + return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) +} + +// SetStructPointer writes a *struct field in the struct. +func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { + *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q +} + +// GetStructPointer reads a *struct field in the struct. +func structPointer_GetStructPointer(p structPointer, f field) structPointer { + return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StructPointerSlice the address of a []*struct field in the struct. +func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { + return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). +type structPointerSlice []structPointer + +func (v *structPointerSlice) Len() int { return len(*v) } +func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } +func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } + +// A word32 is the address of a "pointer to 32-bit value" field. +type word32 **uint32 + +// IsNil reports whether *v is nil. +func word32_IsNil(p word32) bool { + return *p == nil +} + +// Set sets *v to point at a newly allocated word set to x. +func word32_Set(p word32, o *Buffer, x uint32) { + if len(o.uint32s) == 0 { + o.uint32s = make([]uint32, uint32PoolSize) + } + o.uint32s[0] = x + *p = &o.uint32s[0] + o.uint32s = o.uint32s[1:] +} + +// Get gets the value pointed at by *v. +func word32_Get(p word32) uint32 { + return **p +} + +// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32(p structPointer, f field) word32 { + return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// A word32Val is the address of a 32-bit value field. +type word32Val *uint32 + +// Set sets *p to x. +func word32Val_Set(p word32Val, x uint32) { + *p = x +} + +// Get gets the value pointed at by p. +func word32Val_Get(p word32Val) uint32 { + return *p +} + +// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32Val(p structPointer, f field) word32Val { + return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// A word32Slice is a slice of 32-bit values. +type word32Slice []uint32 + +func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } +func (v *word32Slice) Len() int { return len(*v) } +func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } + +// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. +func structPointer_Word32Slice(p structPointer, f field) *word32Slice { + return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// word64 is like word32 but for 64-bit values. +type word64 **uint64 + +func word64_Set(p word64, o *Buffer, x uint64) { + if len(o.uint64s) == 0 { + o.uint64s = make([]uint64, uint64PoolSize) + } + o.uint64s[0] = x + *p = &o.uint64s[0] + o.uint64s = o.uint64s[1:] +} + +func word64_IsNil(p word64) bool { + return *p == nil +} + +func word64_Get(p word64) uint64 { + return **p +} + +func structPointer_Word64(p structPointer, f field) word64 { + return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// word64Val is like word32Val but for 64-bit values. +type word64Val *uint64 + +func word64Val_Set(p word64Val, o *Buffer, x uint64) { + *p = x +} + +func word64Val_Get(p word64Val) uint64 { + return *p +} + +func structPointer_Word64Val(p structPointer, f field) word64Val { + return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// word64Slice is like word32Slice but for 64-bit values. +type word64Slice []uint64 + +func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } +func (v *word64Slice) Len() int { return len(*v) } +func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } + +func structPointer_Word64Slice(p structPointer, f field) *word64Slice { + return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go new file mode 100644 index 000000000..d4531c056 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/properties.go @@ -0,0 +1,842 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "fmt" + "log" + "os" + "reflect" + "sort" + "strconv" + "strings" + "sync" +) + +const debug bool = false + +// Constants that identify the encoding of a value on the wire. +const ( + WireVarint = 0 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 + WireFixed32 = 5 +) + +const startSize = 10 // initial slice/string sizes + +// Encoders are defined in encode.go +// An encoder outputs the full representation of a field, including its +// tag and encoder type. +type encoder func(p *Buffer, prop *Properties, base structPointer) error + +// A valueEncoder encodes a single integer in a particular encoding. +type valueEncoder func(o *Buffer, x uint64) error + +// Sizers are defined in encode.go +// A sizer returns the encoded size of a field, including its tag and encoder +// type. +type sizer func(prop *Properties, base structPointer) int + +// A valueSizer returns the encoded size of a single integer in a particular +// encoding. +type valueSizer func(x uint64) int + +// Decoders are defined in decode.go +// A decoder creates a value from its wire representation. +// Unrecognized subelements are saved in unrec. +type decoder func(p *Buffer, prop *Properties, base structPointer) error + +// A valueDecoder decodes a single integer in a particular encoding. +type valueDecoder func(o *Buffer) (x uint64, err error) + +// A oneofMarshaler does the marshaling for all oneof fields in a message. +type oneofMarshaler func(Message, *Buffer) error + +// A oneofUnmarshaler does the unmarshaling for a oneof field in a message. +type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error) + +// A oneofSizer does the sizing for all oneof fields in a message. +type oneofSizer func(Message) int + +// tagMap is an optimization over map[int]int for typical protocol buffer +// use-cases. Encoded protocol buffers are often in tag order with small tag +// numbers. +type tagMap struct { + fastTags []int + slowTags map[int]int +} + +// tagMapFastLimit is the upper bound on the tag number that will be stored in +// the tagMap slice rather than its map. +const tagMapFastLimit = 1024 + +func (p *tagMap) get(t int) (int, bool) { + if t > 0 && t < tagMapFastLimit { + if t >= len(p.fastTags) { + return 0, false + } + fi := p.fastTags[t] + return fi, fi >= 0 + } + fi, ok := p.slowTags[t] + return fi, ok +} + +func (p *tagMap) put(t int, fi int) { + if t > 0 && t < tagMapFastLimit { + for len(p.fastTags) < t+1 { + p.fastTags = append(p.fastTags, -1) + } + p.fastTags[t] = fi + return + } + if p.slowTags == nil { + p.slowTags = make(map[int]int) + } + p.slowTags[t] = fi +} + +// StructProperties represents properties for all the fields of a struct. +// decoderTags and decoderOrigNames should only be used by the decoder. +type StructProperties struct { + Prop []*Properties // properties for each field + reqCount int // required count + decoderTags tagMap // map from proto tag to struct field number + decoderOrigNames map[string]int // map from original name to struct field number + order []int // list of struct field numbers in tag order + unrecField field // field id of the XXX_unrecognized []byte field + extendable bool // is this an extendable proto + + oneofMarshaler oneofMarshaler + oneofUnmarshaler oneofUnmarshaler + oneofSizer oneofSizer + stype reflect.Type + + // OneofTypes contains information about the oneof fields in this message. + // It is keyed by the original name of a field. + OneofTypes map[string]*OneofProperties +} + +// OneofProperties represents information about a specific field in a oneof. +type OneofProperties struct { + Type reflect.Type // pointer to generated struct type for this oneof field + Field int // struct field number of the containing oneof in the message + Prop *Properties +} + +// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. +// See encode.go, (*Buffer).enc_struct. + +func (sp *StructProperties) Len() int { return len(sp.order) } +func (sp *StructProperties) Less(i, j int) bool { + return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag +} +func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } + +// Properties represents the protocol-specific behavior of a single struct field. +type Properties struct { + Name string // name of the field, for error messages + OrigName string // original name before protocol compiler (always set) + Wire string + WireType int + Tag int + Required bool + Optional bool + Repeated bool + Packed bool // relevant for repeated primitives only + Enum string // set for enum types only + proto3 bool // whether this is known to be a proto3 field; set for []byte only + oneof bool // whether this is a oneof field + + Default string // default value + HasDefault bool // whether an explicit default was provided + def_uint64 uint64 + + enc encoder + valEnc valueEncoder // set for bool and numeric types only + field field + tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) + tagbuf [8]byte + stype reflect.Type // set for struct types only + sprop *StructProperties // set for struct types only + isMarshaler bool + isUnmarshaler bool + + mtype reflect.Type // set for map types only + mkeyprop *Properties // set for map types only + mvalprop *Properties // set for map types only + + size sizer + valSize valueSizer // set for bool and numeric types only + + dec decoder + valDec valueDecoder // set for bool and numeric types only + + // If this is a packable field, this will be the decoder for the packed version of the field. + packedDec decoder +} + +// String formats the properties in the protobuf struct field tag style. +func (p *Properties) String() string { + s := p.Wire + s = "," + s += strconv.Itoa(p.Tag) + if p.Required { + s += ",req" + } + if p.Optional { + s += ",opt" + } + if p.Repeated { + s += ",rep" + } + if p.Packed { + s += ",packed" + } + if p.OrigName != p.Name { + s += ",name=" + p.OrigName + } + if p.proto3 { + s += ",proto3" + } + if p.oneof { + s += ",oneof" + } + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if p.HasDefault { + s += ",def=" + p.Default + } + return s +} + +// Parse populates p by parsing a string in the protobuf struct field tag style. +func (p *Properties) Parse(s string) { + // "bytes,49,opt,name=foo,def=hello!" + fields := strings.Split(s, ",") // breaks def=, but handled below. + if len(fields) < 2 { + fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) + return + } + + p.Wire = fields[0] + switch p.Wire { + case "varint": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeVarint + p.valDec = (*Buffer).DecodeVarint + p.valSize = sizeVarint + case "fixed32": + p.WireType = WireFixed32 + p.valEnc = (*Buffer).EncodeFixed32 + p.valDec = (*Buffer).DecodeFixed32 + p.valSize = sizeFixed32 + case "fixed64": + p.WireType = WireFixed64 + p.valEnc = (*Buffer).EncodeFixed64 + p.valDec = (*Buffer).DecodeFixed64 + p.valSize = sizeFixed64 + case "zigzag32": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeZigzag32 + p.valDec = (*Buffer).DecodeZigzag32 + p.valSize = sizeZigzag32 + case "zigzag64": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeZigzag64 + p.valDec = (*Buffer).DecodeZigzag64 + p.valSize = sizeZigzag64 + case "bytes", "group": + p.WireType = WireBytes + // no numeric converter for non-numeric types + default: + fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) + return + } + + var err error + p.Tag, err = strconv.Atoi(fields[1]) + if err != nil { + return + } + + for i := 2; i < len(fields); i++ { + f := fields[i] + switch { + case f == "req": + p.Required = true + case f == "opt": + p.Optional = true + case f == "rep": + p.Repeated = true + case f == "packed": + p.Packed = true + case strings.HasPrefix(f, "name="): + p.OrigName = f[5:] + case strings.HasPrefix(f, "enum="): + p.Enum = f[5:] + case f == "proto3": + p.proto3 = true + case f == "oneof": + p.oneof = true + case strings.HasPrefix(f, "def="): + p.HasDefault = true + p.Default = f[4:] // rest of string + if i+1 < len(fields) { + // Commas aren't escaped, and def is always last. + p.Default += "," + strings.Join(fields[i+1:], ",") + break + } + } + } +} + +func logNoSliceEnc(t1, t2 reflect.Type) { + fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) +} + +var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() + +// Initialize the fields for encoding and decoding. +func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { + p.enc = nil + p.dec = nil + p.size = nil + + switch t1 := typ; t1.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) + + // proto3 scalar types + + case reflect.Bool: + p.enc = (*Buffer).enc_proto3_bool + p.dec = (*Buffer).dec_proto3_bool + p.size = size_proto3_bool + case reflect.Int32: + p.enc = (*Buffer).enc_proto3_int32 + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_proto3_int32 + case reflect.Uint32: + p.enc = (*Buffer).enc_proto3_uint32 + p.dec = (*Buffer).dec_proto3_int32 // can reuse + p.size = size_proto3_uint32 + case reflect.Int64, reflect.Uint64: + p.enc = (*Buffer).enc_proto3_int64 + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_proto3_int64 + case reflect.Float32: + p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_proto3_uint32 + case reflect.Float64: + p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_proto3_int64 + case reflect.String: + p.enc = (*Buffer).enc_proto3_string + p.dec = (*Buffer).dec_proto3_string + p.size = size_proto3_string + + case reflect.Ptr: + switch t2 := t1.Elem(); t2.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) + break + case reflect.Bool: + p.enc = (*Buffer).enc_bool + p.dec = (*Buffer).dec_bool + p.size = size_bool + case reflect.Int32: + p.enc = (*Buffer).enc_int32 + p.dec = (*Buffer).dec_int32 + p.size = size_int32 + case reflect.Uint32: + p.enc = (*Buffer).enc_uint32 + p.dec = (*Buffer).dec_int32 // can reuse + p.size = size_uint32 + case reflect.Int64, reflect.Uint64: + p.enc = (*Buffer).enc_int64 + p.dec = (*Buffer).dec_int64 + p.size = size_int64 + case reflect.Float32: + p.enc = (*Buffer).enc_uint32 // can just treat them as bits + p.dec = (*Buffer).dec_int32 + p.size = size_uint32 + case reflect.Float64: + p.enc = (*Buffer).enc_int64 // can just treat them as bits + p.dec = (*Buffer).dec_int64 + p.size = size_int64 + case reflect.String: + p.enc = (*Buffer).enc_string + p.dec = (*Buffer).dec_string + p.size = size_string + case reflect.Struct: + p.stype = t1.Elem() + p.isMarshaler = isMarshaler(t1) + p.isUnmarshaler = isUnmarshaler(t1) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_struct_message + p.dec = (*Buffer).dec_struct_message + p.size = size_struct_message + } else { + p.enc = (*Buffer).enc_struct_group + p.dec = (*Buffer).dec_struct_group + p.size = size_struct_group + } + } + + case reflect.Slice: + switch t2 := t1.Elem(); t2.Kind() { + default: + logNoSliceEnc(t1, t2) + break + case reflect.Bool: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_bool + p.size = size_slice_packed_bool + } else { + p.enc = (*Buffer).enc_slice_bool + p.size = size_slice_bool + } + p.dec = (*Buffer).dec_slice_bool + p.packedDec = (*Buffer).dec_slice_packed_bool + case reflect.Int32: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int32 + p.size = size_slice_packed_int32 + } else { + p.enc = (*Buffer).enc_slice_int32 + p.size = size_slice_int32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case reflect.Uint32: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_uint32 + p.size = size_slice_packed_uint32 + } else { + p.enc = (*Buffer).enc_slice_uint32 + p.size = size_slice_uint32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case reflect.Int64, reflect.Uint64: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int64 + p.size = size_slice_packed_int64 + } else { + p.enc = (*Buffer).enc_slice_int64 + p.size = size_slice_int64 + } + p.dec = (*Buffer).dec_slice_int64 + p.packedDec = (*Buffer).dec_slice_packed_int64 + case reflect.Uint8: + p.enc = (*Buffer).enc_slice_byte + p.dec = (*Buffer).dec_slice_byte + p.size = size_slice_byte + // This is a []byte, which is either a bytes field, + // or the value of a map field. In the latter case, + // we always encode an empty []byte, so we should not + // use the proto3 enc/size funcs. + // f == nil iff this is the key/value of a map field. + if p.proto3 && f != nil { + p.enc = (*Buffer).enc_proto3_slice_byte + p.size = size_proto3_slice_byte + } + case reflect.Float32, reflect.Float64: + switch t2.Bits() { + case 32: + // can just treat them as bits + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_uint32 + p.size = size_slice_packed_uint32 + } else { + p.enc = (*Buffer).enc_slice_uint32 + p.size = size_slice_uint32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case 64: + // can just treat them as bits + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int64 + p.size = size_slice_packed_int64 + } else { + p.enc = (*Buffer).enc_slice_int64 + p.size = size_slice_int64 + } + p.dec = (*Buffer).dec_slice_int64 + p.packedDec = (*Buffer).dec_slice_packed_int64 + default: + logNoSliceEnc(t1, t2) + break + } + case reflect.String: + p.enc = (*Buffer).enc_slice_string + p.dec = (*Buffer).dec_slice_string + p.size = size_slice_string + case reflect.Ptr: + switch t3 := t2.Elem(); t3.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) + break + case reflect.Struct: + p.stype = t2.Elem() + p.isMarshaler = isMarshaler(t2) + p.isUnmarshaler = isUnmarshaler(t2) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_slice_struct_message + p.dec = (*Buffer).dec_slice_struct_message + p.size = size_slice_struct_message + } else { + p.enc = (*Buffer).enc_slice_struct_group + p.dec = (*Buffer).dec_slice_struct_group + p.size = size_slice_struct_group + } + } + case reflect.Slice: + switch t2.Elem().Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) + break + case reflect.Uint8: + p.enc = (*Buffer).enc_slice_slice_byte + p.dec = (*Buffer).dec_slice_slice_byte + p.size = size_slice_slice_byte + } + } + + case reflect.Map: + p.enc = (*Buffer).enc_new_map + p.dec = (*Buffer).dec_new_map + p.size = size_new_map + + p.mtype = t1 + p.mkeyprop = &Properties{} + p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) + p.mvalprop = &Properties{} + vtype := p.mtype.Elem() + if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { + // The value type is not a message (*T) or bytes ([]byte), + // so we need encoders for the pointer to this type. + vtype = reflect.PtrTo(vtype) + } + p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) + } + + // precalculate tag code + wire := p.WireType + if p.Packed { + wire = WireBytes + } + x := uint32(p.Tag)<<3 | uint32(wire) + i := 0 + for i = 0; x > 127; i++ { + p.tagbuf[i] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + p.tagbuf[i] = uint8(x) + p.tagcode = p.tagbuf[0 : i+1] + + if p.stype != nil { + if lockGetProp { + p.sprop = GetProperties(p.stype) + } else { + p.sprop = getPropertiesLocked(p.stype) + } + } +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() + unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() +) + +// isMarshaler reports whether type t implements Marshaler. +func isMarshaler(t reflect.Type) bool { + // We're checking for (likely) pointer-receiver methods + // so if t is not a pointer, something is very wrong. + // The calls above only invoke isMarshaler on pointer types. + if t.Kind() != reflect.Ptr { + panic("proto: misuse of isMarshaler") + } + return t.Implements(marshalerType) +} + +// isUnmarshaler reports whether type t implements Unmarshaler. +func isUnmarshaler(t reflect.Type) bool { + // We're checking for (likely) pointer-receiver methods + // so if t is not a pointer, something is very wrong. + // The calls above only invoke isUnmarshaler on pointer types. + if t.Kind() != reflect.Ptr { + panic("proto: misuse of isUnmarshaler") + } + return t.Implements(unmarshalerType) +} + +// Init populates the properties from a protocol buffer struct tag. +func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { + p.init(typ, name, tag, f, true) +} + +func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { + // "bytes,49,opt,def=hello!" + p.Name = name + p.OrigName = name + if f != nil { + p.field = toField(f) + } + if tag == "" { + return + } + p.Parse(tag) + p.setEncAndDec(typ, f, lockGetProp) +} + +var ( + propertiesMu sync.RWMutex + propertiesMap = make(map[reflect.Type]*StructProperties) +) + +// GetProperties returns the list of properties for the type represented by t. +// t must represent a generated struct type of a protocol message. +func GetProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic("proto: type must have kind struct") + } + + // Most calls to GetProperties in a long-running program will be + // retrieving details for types we have seen before. + propertiesMu.RLock() + sprop, ok := propertiesMap[t] + propertiesMu.RUnlock() + if ok { + if collectStats { + stats.Chit++ + } + return sprop + } + + propertiesMu.Lock() + sprop = getPropertiesLocked(t) + propertiesMu.Unlock() + return sprop +} + +// getPropertiesLocked requires that propertiesMu is held. +func getPropertiesLocked(t reflect.Type) *StructProperties { + if prop, ok := propertiesMap[t]; ok { + if collectStats { + stats.Chit++ + } + return prop + } + if collectStats { + stats.Cmiss++ + } + + prop := new(StructProperties) + // in case of recursive protos, fill this in now. + propertiesMap[t] = prop + + // build properties + prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) + prop.unrecField = invalidField + prop.Prop = make([]*Properties, t.NumField()) + prop.order = make([]int, t.NumField()) + + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + p := new(Properties) + name := f.Name + p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) + + if f.Name == "XXX_extensions" { // special case + p.enc = (*Buffer).enc_map + p.dec = nil // not needed + p.size = size_map + } + if f.Name == "XXX_unrecognized" { // special case + prop.unrecField = toField(&f) + } + oneof := f.Tag.Get("protobuf_oneof") != "" // special case + prop.Prop[i] = p + prop.order[i] = i + if debug { + print(i, " ", f.Name, " ", t.String(), " ") + if p.Tag > 0 { + print(p.String()) + } + print("\n") + } + if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && !oneof { + fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") + } + } + + // Re-order prop.order. + sort.Sort(prop) + + type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + } + if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { + var oots []interface{} + prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs() + prop.stype = t + + // Interpret oneof metadata. + prop.OneofTypes = make(map[string]*OneofProperties) + for _, oot := range oots { + oop := &OneofProperties{ + Type: reflect.ValueOf(oot).Type(), // *T + Prop: new(Properties), + } + sft := oop.Type.Elem().Field(0) + oop.Prop.Name = sft.Name + oop.Prop.Parse(sft.Tag.Get("protobuf")) + // There will be exactly one interface field that + // this new value is assignable to. + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Type.Kind() != reflect.Interface { + continue + } + if !oop.Type.AssignableTo(f.Type) { + continue + } + oop.Field = i + break + } + prop.OneofTypes[oop.Prop.OrigName] = oop + } + } + + // build required counts + // build tags + reqCount := 0 + prop.decoderOrigNames = make(map[string]int) + for i, p := range prop.Prop { + if strings.HasPrefix(p.Name, "XXX_") { + // Internal fields should not appear in tags/origNames maps. + // They are handled specially when encoding and decoding. + continue + } + if p.Required { + reqCount++ + } + prop.decoderTags.put(p.Tag, i) + prop.decoderOrigNames[p.OrigName] = i + } + prop.reqCount = reqCount + + return prop +} + +// Return the Properties object for the x[0]'th field of the structure. +func propByIndex(t reflect.Type, x []int) *Properties { + if len(x) != 1 { + fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) + return nil + } + prop := GetProperties(t) + return prop.Prop[x[0]] +} + +// Get the address and type of a pointer to a struct from an interface. +func getbase(pb Message) (t reflect.Type, b structPointer, err error) { + if pb == nil { + err = ErrNil + return + } + // get the reflect type of the pointer to the struct. + t = reflect.TypeOf(pb) + // get the address of the struct. + value := reflect.ValueOf(pb) + b = toStructPointer(value) + return +} + +// A global registry of enum types. +// The generated code will register the generated maps by calling RegisterEnum. + +var enumValueMaps = make(map[string]map[string]int32) + +// RegisterEnum is called from the generated code to install the enum descriptor +// maps into the global table to aid parsing text format protocol buffers. +func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { + if _, ok := enumValueMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumValueMaps[typeName] = valueMap +} + +// EnumValueMap returns the mapping from names to integers of the +// enum type enumType, or a nil if not found. +func EnumValueMap(enumType string) map[string]int32 { + return enumValueMaps[enumType] +} + +// A registry of all linked message types. +// The string is a fully-qualified proto name ("pkg.Message"). +var ( + protoTypes = make(map[string]reflect.Type) + revProtoTypes = make(map[reflect.Type]string) +) + +// RegisterType is called from generated code and maps from the fully qualified +// proto name to the type (pointer to struct) of the protocol buffer. +func RegisterType(x Message, name string) { + if _, ok := protoTypes[name]; ok { + // TODO: Some day, make this a panic. + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + protoTypes[name] = t + revProtoTypes[t] = name +} + +// MessageName returns the fully-qualified proto name for the given message type. +func MessageName(x Message) string { return revProtoTypes[reflect.TypeOf(x)] } + +// MessageType returns the message type (pointer to struct) for a named message. +func MessageType(name string) reflect.Type { return protoTypes[name] } diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go new file mode 100644 index 000000000..2336b144c --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text.go @@ -0,0 +1,751 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for writing the text protocol buffer format. + +import ( + "bufio" + "bytes" + "encoding" + "errors" + "fmt" + "io" + "log" + "math" + "reflect" + "sort" + "strings" +) + +var ( + newline = []byte("\n") + spaces = []byte(" ") + gtNewline = []byte(">\n") + endBraceNewline = []byte("}\n") + backslashN = []byte{'\\', 'n'} + backslashR = []byte{'\\', 'r'} + backslashT = []byte{'\\', 't'} + backslashDQ = []byte{'\\', '"'} + backslashBS = []byte{'\\', '\\'} + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +type writer interface { + io.Writer + WriteByte(byte) error +} + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + ind int + complete bool // if the current position is a complete line + compact bool // whether to write out as a one-liner + w writer +} + +func (w *textWriter) WriteString(s string) (n int, err error) { + if !strings.Contains(s, "\n") { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + return io.WriteString(w.w, s) + } + // WriteString is typically called without newlines, so this + // codepath and its copy are rare. We copy to avoid + // duplicating all of Write's logic here. + return w.Write([]byte(s)) +} + +func (w *textWriter) Write(p []byte) (n int, err error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + n, err = w.w.Write(p) + w.complete = false + return n, err + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + if err := w.w.WriteByte(' '); err != nil { + return n, err + } + n++ + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + if i+1 < len(frags) { + if err := w.w.WriteByte('\n'); err != nil { + return n, err + } + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + err := w.w.WriteByte(c) + w.complete = c == '\n' + return err +} + +func (w *textWriter) indent() { w.ind++ } + +func (w *textWriter) unindent() { + if w.ind == 0 { + log.Printf("proto: textWriter unindented too far") + return + } + w.ind-- +} + +func writeName(w *textWriter, props *Properties) error { + if _, err := w.WriteString(props.OrigName); err != nil { + return err + } + if props.Wire != "group" { + return w.WriteByte(':') + } + return nil +} + +// raw is the interface satisfied by RawMessage. +type raw interface { + Bytes() []byte +} + +func writeStruct(w *textWriter, sv reflect.Value) error { + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < sv.NumField(); i++ { + fv := sv.Field(i) + props := sprops.Prop[i] + name := st.Field(i).Name + + if strings.HasPrefix(name, "XXX_") { + // There are two XXX_ fields: + // XXX_unrecognized []byte + // XXX_extensions map[int32]proto.Extension + // The first is handled here; + // the second is handled at the bottom of this function. + if name == "XXX_unrecognized" && !fv.IsNil() { + if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Field not filled in. This could be an optional field or + // a required field that wasn't filled in. Either way, there + // isn't anything we can show for it. + continue + } + if fv.Kind() == reflect.Slice && fv.IsNil() { + // Repeated field that is empty, or a bytes field that is unused. + continue + } + + if props.Repeated && fv.Kind() == reflect.Slice { + // Repeated field. + for j := 0; j < fv.Len(); j++ { + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + v := fv.Index(j) + if v.Kind() == reflect.Ptr && v.IsNil() { + // A nil message in a repeated field is not valid, + // but we can handle that more gracefully than panicking. + if _, err := w.Write([]byte("\n")); err != nil { + return err + } + continue + } + if err := writeAny(w, v, props); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Map { + // Map fields are rendered as a repeated struct with key/value fields. + keys := fv.MapKeys() + sort.Sort(mapKeys(keys)) + for _, key := range keys { + val := fv.MapIndex(key) + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + // open struct + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + // key + if _, err := w.WriteString("key:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := writeAny(w, key, props.mkeyprop); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + // nil values aren't legal, but we can avoid panicking because of them. + if val.Kind() != reflect.Ptr || !val.IsNil() { + // value + if _, err := w.WriteString("value:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := writeAny(w, val, props.mvalprop); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + // close struct + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { + // empty bytes field + continue + } + if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { + // proto3 non-repeated scalar field; skip if zero value + if isProto3Zero(fv) { + continue + } + } + + if fv.Kind() == reflect.Interface { + // Check if it is a oneof. + if st.Field(i).Tag.Get("protobuf_oneof") != "" { + // fv is nil, or holds a pointer to generated struct. + // That generated struct has exactly one field, + // which has a protobuf struct tag. + if fv.IsNil() { + continue + } + inner := fv.Elem().Elem() // interface -> *T -> T + tag := inner.Type().Field(0).Tag.Get("protobuf") + props = new(Properties) // Overwrite the outer props var, but not its pointee. + props.Parse(tag) + // Write the value in the oneof, not the oneof itself. + fv = inner.Field(0) + + // Special case to cope with malformed messages gracefully: + // If the value in the oneof is a nil pointer, don't panic + // in writeAny. + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Use errors.New so writeAny won't render quotes. + msg := errors.New("/* nil */") + fv = reflect.ValueOf(&msg).Elem() + } + } + } + + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if b, ok := fv.Interface().(raw); ok { + if err := writeRaw(w, b.Bytes()); err != nil { + return err + } + continue + } + + // Enums have a String method, so writeAny will work fine. + if err := writeAny(w, fv, props); err != nil { + return err + } + + if err := w.WriteByte('\n'); err != nil { + return err + } + } + + // Extensions (the XXX_extensions field). + pv := sv.Addr() + if pv.Type().Implements(extendableProtoType) { + if err := writeExtensions(w, pv); err != nil { + return err + } + } + + return nil +} + +// writeRaw writes an uninterpreted raw message. +func writeRaw(w *textWriter, b []byte) error { + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if err := writeUnknownStruct(w, b); err != nil { + return err + } + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + return nil +} + +// writeAny writes an arbitrary field. +func writeAny(w *textWriter, v reflect.Value, props *Properties) error { + v = reflect.Indirect(v) + + // Floats have special cases. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + x := v.Float() + var b []byte + switch { + case math.IsInf(x, 1): + b = posInf + case math.IsInf(x, -1): + b = negInf + case math.IsNaN(x): + b = nan + } + if b != nil { + _, err := w.Write(b) + return err + } + // Other values are handled below. + } + + // We don't attempt to serialise every possible value type; only those + // that can occur in protocol buffers. + switch v.Kind() { + case reflect.Slice: + // Should only be a []byte; repeated fields are handled in writeStruct. + if err := writeString(w, string(v.Interface().([]byte))); err != nil { + return err + } + case reflect.String: + if err := writeString(w, v.String()); err != nil { + return err + } + case reflect.Struct: + // Required/optional group/message. + var bra, ket byte = '<', '>' + if props != nil && props.Wire == "group" { + bra, ket = '{', '}' + } + if err := w.WriteByte(bra); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if tm, ok := v.Interface().(encoding.TextMarshaler); ok { + text, err := tm.MarshalText() + if err != nil { + return err + } + if _, err = w.Write(text); err != nil { + return err + } + } else if err := writeStruct(w, v); err != nil { + return err + } + w.unindent() + if err := w.WriteByte(ket); err != nil { + return err + } + default: + _, err := fmt.Fprint(w, v.Interface()) + return err + } + return nil +} + +// equivalent to C's isprint. +func isprint(c byte) bool { + return c >= 0x20 && c < 0x7f +} + +// writeString writes a string in the protocol buffer text format. +// It is similar to strconv.Quote except we don't use Go escape sequences, +// we treat the string as a byte sequence, and we use octal escapes. +// These differences are to maintain interoperability with the other +// languages' implementations of the text format. +func writeString(w *textWriter, s string) error { + // use WriteByte here to get any needed indent + if err := w.WriteByte('"'); err != nil { + return err + } + // Loop over the bytes, not the runes. + for i := 0; i < len(s); i++ { + var err error + // Divergence from C++: we don't escape apostrophes. + // There's no need to escape them, and the C++ parser + // copes with a naked apostrophe. + switch c := s[i]; c { + case '\n': + _, err = w.w.Write(backslashN) + case '\r': + _, err = w.w.Write(backslashR) + case '\t': + _, err = w.w.Write(backslashT) + case '"': + _, err = w.w.Write(backslashDQ) + case '\\': + _, err = w.w.Write(backslashBS) + default: + if isprint(c) { + err = w.w.WriteByte(c) + } else { + _, err = fmt.Fprintf(w.w, "\\%03o", c) + } + } + if err != nil { + return err + } + } + return w.WriteByte('"') +} + +func writeUnknownStruct(w *textWriter, data []byte) (err error) { + if !w.compact { + if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { + return err + } + } + b := NewBuffer(data) + for b.index < len(b.buf) { + x, err := b.DecodeVarint() + if err != nil { + _, err := fmt.Fprintf(w, "/* %v */\n", err) + return err + } + wire, tag := x&7, x>>3 + if wire == WireEndGroup { + w.unindent() + if _, err := w.Write(endBraceNewline); err != nil { + return err + } + continue + } + if _, err := fmt.Fprint(w, tag); err != nil { + return err + } + if wire != WireStartGroup { + if err := w.WriteByte(':'); err != nil { + return err + } + } + if !w.compact || wire == WireStartGroup { + if err := w.WriteByte(' '); err != nil { + return err + } + } + switch wire { + case WireBytes: + buf, e := b.DecodeRawBytes(false) + if e == nil { + _, err = fmt.Fprintf(w, "%q", buf) + } else { + _, err = fmt.Fprintf(w, "/* %v */", e) + } + case WireFixed32: + x, err = b.DecodeFixed32() + err = writeUnknownInt(w, x, err) + case WireFixed64: + x, err = b.DecodeFixed64() + err = writeUnknownInt(w, x, err) + case WireStartGroup: + err = w.WriteByte('{') + w.indent() + case WireVarint: + x, err = b.DecodeVarint() + err = writeUnknownInt(w, x, err) + default: + _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) + } + if err != nil { + return err + } + if err = w.WriteByte('\n'); err != nil { + return err + } + } + return nil +} + +func writeUnknownInt(w *textWriter, x uint64, err error) error { + if err == nil { + _, err = fmt.Fprint(w, x) + } else { + _, err = fmt.Fprintf(w, "/* %v */", err) + } + return err +} + +type int32Slice []int32 + +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// writeExtensions writes all the extensions in pv. +// pv is assumed to be a pointer to a protocol message struct that is extendable. +func writeExtensions(w *textWriter, pv reflect.Value) error { + emap := extensionMaps[pv.Type().Elem()] + ep := pv.Interface().(extendableProto) + + // Order the extensions by ID. + // This isn't strictly necessary, but it will give us + // canonical output, which will also make testing easier. + m := ep.ExtensionMap() + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + + for _, extNum := range ids { + ext := m[extNum] + var desc *ExtensionDesc + if emap != nil { + desc = emap[extNum] + } + if desc == nil { + // Unknown extension. + if err := writeUnknownStruct(w, ext.enc); err != nil { + return err + } + continue + } + + pb, err := GetExtension(ep, desc) + if err != nil { + return fmt.Errorf("failed getting extension: %v", err) + } + + // Repeated extensions will appear as a slice. + if !desc.repeated() { + if err := writeExtension(w, desc.Name, pb); err != nil { + return err + } + } else { + v := reflect.ValueOf(pb) + for i := 0; i < v.Len(); i++ { + if err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { + return err + } + } + } + } + return nil +} + +func writeExtension(w *textWriter, name string, pb interface{}) error { + if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := writeAny(w, reflect.ValueOf(pb), nil); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + remain := w.ind * 2 + for remain > 0 { + n := remain + if n > len(spaces) { + n = len(spaces) + } + w.w.Write(spaces[:n]) + remain -= n + } + w.complete = false +} + +func marshalText(w io.Writer, pb Message, compact bool) error { + val := reflect.ValueOf(pb) + if pb == nil || val.IsNil() { + w.Write([]byte("")) + return nil + } + var bw *bufio.Writer + ww, ok := w.(writer) + if !ok { + bw = bufio.NewWriter(w) + ww = bw + } + aw := &textWriter{ + w: ww, + complete: true, + compact: compact, + } + + if tm, ok := pb.(encoding.TextMarshaler); ok { + text, err := tm.MarshalText() + if err != nil { + return err + } + if _, err = aw.Write(text); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil + } + // Dereference the received pointer so we don't have outer < and >. + v := reflect.Indirect(val) + if err := writeStruct(aw, v); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil +} + +// MarshalText writes a given protocol buffer in text format. +// The only errors returned are from w. +func MarshalText(w io.Writer, pb Message) error { + return marshalText(w, pb, false) +} + +// MarshalTextString is the same as MarshalText, but returns the string directly. +func MarshalTextString(pb Message) string { + var buf bytes.Buffer + marshalText(&buf, pb, false) + return buf.String() +} + +// CompactText writes a given protocol buffer in compact text format (one line). +func CompactText(w io.Writer, pb Message) error { return marshalText(w, pb, true) } + +// CompactTextString is the same as CompactText, but returns the string directly. +func CompactTextString(pb Message) string { + var buf bytes.Buffer + marshalText(&buf, pb, true) + return buf.String() +} diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go new file mode 100644 index 000000000..6d0cf2589 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text_parser.go @@ -0,0 +1,798 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for parsing the Text protocol buffer format. +// TODO: message sets. + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "unicode/utf8" +) + +type ParseError struct { + Message string + Line int // 1-based line number + Offset int // 0-based byte offset from start of input +} + +func (p *ParseError) Error() string { + if p.Line == 1 { + // show offset only for first line + return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) + } + return fmt.Sprintf("line %d: %v", p.Line, p.Message) +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func (t *token) String() string { + if t.err == nil { + return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) + } + return fmt.Sprintf("parse error: %v", t.err) +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +// Numbers and identifiers are matched by [-+._A-Za-z0-9] +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +var ( + errBadUTF8 = errors.New("proto: bad UTF-8") + errBadHex = errors.New("proto: bad hexadecimal") +) + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + base := 8 + ss := s[:2] + s = s[2:] + if r == 'x' || r == 'X' { + base = 16 + } else { + ss = string(r) + ss + } + i, err := strconv.ParseUint(ss, base, 8) + if err != nil { + return "", "", err + } + return string([]byte{byte(i)}), s, nil + case 'u', 'U': + n := 4 + if r == 'U' { + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) + } + + bs := make([]byte, n/2) + for i := 0; i < n; i += 2 { + a, ok1 := unhex(s[i]) + b, ok2 := unhex(s[i+1]) + if !ok1 || !ok2 { + return "", "", errBadHex + } + bs[i/2] = a<<4 | b + } + s = s[n:] + return string(bs), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +// Adapted from src/pkg/strconv/quote.go. +func unhex(b byte) (v byte, ok bool) { + switch { + case '0' <= b && b <= '9': + return b - '0', true + case 'a' <= b && b <= 'f': + return b - 'a' + 10, true + case 'A' <= b && b <= 'F': + return b - 'A' + 10, true + } + return 0, false +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && p.cur.value[0] == '"' { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || p.s[0] != '"' { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +func (p *textParser) consumeToken(s string) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != s { + p.back() + return p.errorf("expected %q, found %q", s, tok.value) + } + return nil +} + +// Return a RequiredNotSetError indicating which required field was not set. +func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < st.NumField(); i++ { + if !isNil(sv.Field(i)) { + continue + } + + props := sprops.Prop[i] + if props.Required { + return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} + } + } + return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen +} + +// Returns the index in the struct for the named field, as well as the parsed tag properties. +func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { + i, ok := sprops.decoderOrigNames[name] + if ok { + return i, sprops.Prop[i], true + } + return -1, nil, false +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + // Colon is optional when the field is a group or message. + needColon := true + switch props.Wire { + case "group": + needColon = false + case "bytes": + // A "bytes" field is either a message, a string, or a repeated field; + // those three become *T, *string and []T respectively, so we can check for + // this field being a pointer to a non-string. + if typ.Kind() == reflect.Ptr { + // *T or *string + if typ.Elem().Kind() == reflect.String { + break + } + } else if typ.Kind() == reflect.Slice { + // []T or []*T + if typ.Elem().Kind() != reflect.Ptr { + break + } + } else if typ.Kind() == reflect.String { + // The proto3 exception is for a string field, + // which requires a colon. + break + } + needColon = false + } + if needColon { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +func (p *textParser) readStruct(sv reflect.Value, terminator string) error { + st := sv.Type() + sprops := GetProperties(st) + reqCount := sprops.reqCount + var reqFieldErr error + fieldSet := make(map[string]bool) + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]". + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + // Looks like an extension. + // + // TODO: Check whether we need to handle + // namespace rooted names (e.g. ".something.Foo"). + tok = p.next() + if tok.err != nil { + return tok.err + } + var desc *ExtensionDesc + // This could be faster, but it's functional. + // TODO: Do something smarter than a linear scan. + for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { + if d.Name == tok.value { + desc = d + break + } + } + if desc == nil { + return p.errorf("unrecognized extension %q", tok.value) + } + // Check the extension terminator. + tok = p.next() + if tok.err != nil { + return tok.err + } + if tok.value != "]" { + return p.errorf("unrecognized extension terminator %q", tok.value) + } + + props := &Properties{} + props.Parse(desc.Tag) + + typ := reflect.TypeOf(desc.ExtensionType) + if err := p.checkForColon(props, typ); err != nil { + return err + } + + rep := desc.repeated() + + // Read the extension structure, and set it in + // the value we're constructing. + var ext reflect.Value + if !rep { + ext = reflect.New(typ).Elem() + } else { + ext = reflect.New(typ.Elem()).Elem() + } + if err := p.readAny(ext, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + ep := sv.Addr().Interface().(extendableProto) + if !rep { + SetExtension(ep, desc, ext.Interface()) + } else { + old, err := GetExtension(ep, desc) + var sl reflect.Value + if err == nil { + sl = reflect.ValueOf(old) // existing slice + } else { + sl = reflect.MakeSlice(typ, 0, 1) + } + sl = reflect.Append(sl, ext) + SetExtension(ep, desc, sl.Interface()) + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + continue + } + + // This is a normal, non-extension field. + name := tok.value + var dst reflect.Value + fi, props, ok := structFieldByName(sprops, name) + if ok { + dst = sv.Field(fi) + } else if oop, ok := sprops.OneofTypes[name]; ok { + // It is a oneof. + props = oop.Prop + nv := reflect.New(oop.Type.Elem()) + dst = nv.Elem().Field(0) + sv.Field(oop.Field).Set(nv) + } + if !dst.IsValid() { + return p.errorf("unknown field name %q in %v", name, st) + } + + if dst.Kind() == reflect.Map { + // Consume any colon. + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Construct the map if it doesn't already exist. + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + key := reflect.New(dst.Type().Key()).Elem() + val := reflect.New(dst.Type().Elem()).Elem() + + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // Technically the "key" and "value" could come in any order, + // but in practice they won't. + + tok := p.next() + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + if err := p.consumeToken("key"); err != nil { + return err + } + if err := p.consumeToken(":"); err != nil { + return err + } + if err := p.readAny(key, props.mkeyprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + if err := p.consumeToken("value"); err != nil { + return err + } + if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { + return err + } + if err := p.readAny(val, props.mvalprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + if err := p.consumeToken(terminator); err != nil { + return err + } + + dst.SetMapIndex(key, val) + continue + } + + // Check that it's not already set if it's not a repeated field. + if !props.Repeated && fieldSet[name] { + return p.errorf("non-repeated field %q was repeated", name) + } + + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Parse into the field. + fieldSet[name] = true + if err := p.readAny(dst, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } else if props.Required { + reqCount-- + } + + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + + } + + if reqCount > 0 { + return p.missingRequiredFieldError(sv) + } + return reqFieldErr +} + +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in readStruct to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + +func (p *textParser) readAny(v reflect.Value, props *Properties) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "" { + return p.errorf("unexpected EOF") + } + + switch fv := v; fv.Kind() { + case reflect.Slice: + at := v.Type() + if at.Elem().Kind() == reflect.Uint8 { + // Special case for []byte + if tok.value[0] != '"' && tok.value[0] != '\'' { + // Deliberately written out here, as the error after + // this switch statement would write "invalid []byte: ...", + // which is not as user-friendly. + return p.errorf("invalid string: %v", tok.value) + } + bytes := []byte(tok.unquoted) + fv.Set(reflect.ValueOf(bytes)) + return nil + } + // Repeated field. + if tok.value == "[" { + // Repeated field with list notation, like [1,2,3]. + for { + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + err := p.readAny(fv.Index(fv.Len()-1), props) + if err != nil { + return err + } + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "]" { + break + } + if tok.value != "," { + return p.errorf("Expected ']' or ',' found %q", tok.value) + } + } + return nil + } + // One value of the repeated field. + p.back() + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + return p.readAny(fv.Index(fv.Len()-1), props) + case reflect.Bool: + // Either "true", "false", 1 or 0. + switch tok.value { + case "true", "1": + fv.SetBool(true) + return nil + case "false", "0": + fv.SetBool(false) + return nil + } + case reflect.Float32, reflect.Float64: + v := tok.value + // Ignore 'f' for compatibility with output generated by C++, but don't + // remove 'f' when the value is "-inf" or "inf". + if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { + v = v[:len(v)-1] + } + if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { + fv.SetFloat(f) + return nil + } + case reflect.Int32: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + fv.SetInt(x) + return nil + } + + if len(props.Enum) == 0 { + break + } + m, ok := enumValueMaps[props.Enum] + if !ok { + break + } + x, ok := m[tok.value] + if !ok { + break + } + fv.SetInt(int64(x)) + return nil + case reflect.Int64: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + fv.SetInt(x) + return nil + } + + case reflect.Ptr: + // A basic field (indirected through pointer), or a repeated message/group + p.back() + fv.Set(reflect.New(fv.Type().Elem())) + return p.readAny(fv.Elem(), props) + case reflect.String: + if tok.value[0] == '"' || tok.value[0] == '\'' { + fv.SetString(tok.unquoted) + return nil + } + case reflect.Struct: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + // TODO: Handle nested messages which implement encoding.TextUnmarshaler. + return p.readStruct(fv, terminator) + case reflect.Uint32: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + fv.SetUint(uint64(x)) + return nil + } + case reflect.Uint64: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + fv.SetUint(x) + return nil + } + } + return p.errorf("invalid %v: %v", v.Type(), tok.value) +} + +// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb +// before starting to unmarshal, so any existing data in pb is always removed. +// If a required field is not set and no other error occurs, +// UnmarshalText returns *RequiredNotSetError. +func UnmarshalText(s string, pb Message) error { + if um, ok := pb.(encoding.TextUnmarshaler); ok { + err := um.UnmarshalText([]byte(s)) + return err + } + pb.Reset() + v := reflect.ValueOf(pb) + if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { + return pe + } + return nil +} diff --git a/vendor/github.com/gorilla/mux/README.md b/vendor/github.com/gorilla/mux/README.md index 55dd4e59a..b987c9e5d 100644 --- a/vendor/github.com/gorilla/mux/README.md +++ b/vendor/github.com/gorilla/mux/README.md @@ -1,211 +1,216 @@ mux === [![GoDoc](https://godoc.org/github.com/gorilla/mux?status.svg)](https://godoc.org/github.com/gorilla/mux) -[![Build Status](https://travis-ci.org/gorilla/mux.png?branch=master)](https://travis-ci.org/gorilla/mux) - -Package gorilla/mux implements a request router and dispatcher. - -The name mux stands for "HTTP request multiplexer". Like the standard -http.ServeMux, mux.Router matches incoming requests against a list of -registered routes and calls a handler for the route that matches the URL -or other conditions. The main features are: - - * Requests can be matched based on URL host, path, path prefix, schemes, - header and query values, HTTP methods or using custom matchers. - * URL hosts and paths can have variables with an optional regular - expression. - * Registered URLs can be built, or "reversed", which helps maintaining - references to resources. - * Routes can be used as subrouters: nested routes are only tested if the - parent route matches. This is useful to define groups of routes that - share common conditions like a host, a path prefix or other repeated - attributes. As a bonus, this optimizes request matching. - * It implements the http.Handler interface so it is compatible with the - standard http.ServeMux. +[![Build Status](https://travis-ci.org/gorilla/mux.svg?branch=master)](https://travis-ci.org/gorilla/mux) -Let's start registering a couple of URL paths and handlers: +Package `gorilla/mux` implements a request router and dispatcher. - func main() { - r := mux.NewRouter() - r.HandleFunc("/", HomeHandler) - r.HandleFunc("/products", ProductsHandler) - r.HandleFunc("/articles", ArticlesHandler) - http.Handle("/", r) - } +The name mux stands for "HTTP request multiplexer". Like the standard `http.ServeMux`, `mux.Router` matches incoming requests against a list of registered routes and calls a handler for the route that matches the URL or other conditions. The main features are: -Here we register three routes mapping URL paths to handlers. This is -equivalent to how http.HandleFunc() works: if an incoming request URL matches -one of the paths, the corresponding handler is called passing -(http.ResponseWriter, *http.Request) as parameters. +* Requests can be matched based on URL host, path, path prefix, schemes, header and query values, HTTP methods or using custom matchers. +* URL hosts and paths can have variables with an optional regular expression. +* Registered URLs can be built, or "reversed", which helps maintaining references to resources. +* Routes can be used as subrouters: nested routes are only tested if the parent route matches. This is useful to define groups of routes that share common conditions like a host, a path prefix or other repeated attributes. As a bonus, this optimizes request matching. +* It implements the `http.Handler` interface so it is compatible with the standard `http.ServeMux`. -Paths can have variables. They are defined using the format {name} or -{name:pattern}. If a regular expression pattern is not defined, the matched -variable will be anything until the next slash. For example: +Let's start registering a couple of URL paths and handlers: +```go +func main() { r := mux.NewRouter() - r.HandleFunc("/products/{key}", ProductHandler) - r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) - r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) + r.HandleFunc("/", HomeHandler) + r.HandleFunc("/products", ProductsHandler) + r.HandleFunc("/articles", ArticlesHandler) + http.Handle("/", r) +} +``` -The names are used to create a map of route variables which can be retrieved -calling mux.Vars(): +Here we register three routes mapping URL paths to handlers. This is equivalent to how `http.HandleFunc()` works: if an incoming request URL matches one of the paths, the corresponding handler is called passing (`http.ResponseWriter`, `*http.Request`) as parameters. - vars := mux.Vars(request) - category := vars["category"] +Paths can have variables. They are defined using the format `{name}` or `{name:pattern}`. If a regular expression pattern is not defined, the matched variable will be anything until the next slash. For example: -And this is all you need to know about the basic usage. More advanced options -are explained below. +```go +r := mux.NewRouter() +r.HandleFunc("/products/{key}", ProductHandler) +r.HandleFunc("/articles/{category}/", ArticlesCategoryHandler) +r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler) +``` -Routes can also be restricted to a domain or subdomain. Just define a host -pattern to be matched. They can also have variables: +The names are used to create a map of route variables which can be retrieved calling `mux.Vars()`: - r := mux.NewRouter() - // Only matches if domain is "www.example.com". - r.Host("www.example.com") - // Matches a dynamic subdomain. - r.Host("{subdomain:[a-z]+}.domain.com") +```go +vars := mux.Vars(request) +category := vars["category"] +``` + +And this is all you need to know about the basic usage. More advanced options are explained below. + +Routes can also be restricted to a domain or subdomain. Just define a host pattern to be matched. They can also have variables: + +```go +r := mux.NewRouter() +// Only matches if domain is "www.example.com". +r.Host("www.example.com") +// Matches a dynamic subdomain. +r.Host("{subdomain:[a-z]+}.domain.com") +``` There are several other matchers that can be added. To match path prefixes: - r.PathPrefix("/products/") +```go +r.PathPrefix("/products/") +``` ...or HTTP methods: - r.Methods("GET", "POST") +```go +r.Methods("GET", "POST") +``` ...or URL schemes: - r.Schemes("https") +```go +r.Schemes("https") +``` ...or header values: - r.Headers("X-Requested-With", "XMLHttpRequest") +```go +r.Headers("X-Requested-With", "XMLHttpRequest") +``` ...or query values: - r.Queries("key", "value") +```go +r.Queries("key", "value") +``` ...or to use a custom matcher function: - r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { - return r.ProtoMajor == 0 - }) +```go +r.MatcherFunc(func(r *http.Request, rm *RouteMatch) bool { + return r.ProtoMajor == 0 +}) +``` ...and finally, it is possible to combine several matchers in a single route: - r.HandleFunc("/products", ProductsHandler). - Host("www.example.com"). - Methods("GET"). - Schemes("http") +```go +r.HandleFunc("/products", ProductsHandler). + Host("www.example.com"). + Methods("GET"). + Schemes("http") +``` -Setting the same matching conditions again and again can be boring, so we have -a way to group several routes that share the same requirements. -We call it "subrouting". +Setting the same matching conditions again and again can be boring, so we have a way to group several routes that share the same requirements. We call it "subrouting". -For example, let's say we have several URLs that should only match when the -host is `www.example.com`. Create a route for that host and get a "subrouter" -from it: +For example, let's say we have several URLs that should only match when the host is `www.example.com`. Create a route for that host and get a "subrouter" from it: - r := mux.NewRouter() - s := r.Host("www.example.com").Subrouter() +```go +r := mux.NewRouter() +s := r.Host("www.example.com").Subrouter() +``` Then register routes in the subrouter: - s.HandleFunc("/products/", ProductsHandler) - s.HandleFunc("/products/{key}", ProductHandler) - s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) +```go +s.HandleFunc("/products/", ProductsHandler) +s.HandleFunc("/products/{key}", ProductHandler) +s.HandleFunc("/articles/{category}/{id:[0-9]+}"), ArticleHandler) +``` -The three URL paths we registered above will only be tested if the domain is -`www.example.com`, because the subrouter is tested first. This is not -only convenient, but also optimizes request matching. You can create -subrouters combining any attribute matchers accepted by a route. +The three URL paths we registered above will only be tested if the domain is `www.example.com`, because the subrouter is tested first. This is not only convenient, but also optimizes request matching. You can create subrouters combining any attribute matchers accepted by a route. -Subrouters can be used to create domain or path "namespaces": you define -subrouters in a central place and then parts of the app can register its -paths relatively to a given subrouter. +Subrouters can be used to create domain or path "namespaces": you define subrouters in a central place and then parts of the app can register its paths relatively to a given subrouter. -There's one more thing about subroutes. When a subrouter has a path prefix, -the inner routes use it as base for their paths: +There's one more thing about subroutes. When a subrouter has a path prefix, the inner routes use it as base for their paths: - r := mux.NewRouter() - s := r.PathPrefix("/products").Subrouter() - // "/products/" - s.HandleFunc("/", ProductsHandler) - // "/products/{key}/" - s.HandleFunc("/{key}/", ProductHandler) - // "/products/{key}/details" - s.HandleFunc("/{key}/details", ProductDetailsHandler) +```go +r := mux.NewRouter() +s := r.PathPrefix("/products").Subrouter() +// "/products/" +s.HandleFunc("/", ProductsHandler) +// "/products/{key}/" +s.HandleFunc("/{key}/", ProductHandler) +// "/products/{key}/details" +s.HandleFunc("/{key}/details", ProductDetailsHandler) +``` Now let's see how to build registered URLs. -Routes can be named. All routes that define a name can have their URLs built, -or "reversed". We define a name calling Name() on a route. For example: +Routes can be named. All routes that define a name can have their URLs built, or "reversed". We define a name calling `Name()` on a route. For example: - r := mux.NewRouter() - r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). - Name("article") +```go +r := mux.NewRouter() +r.HandleFunc("/articles/{category}/{id:[0-9]+}", ArticleHandler). + Name("article") +``` -To build a URL, get the route and call the URL() method, passing a sequence of -key/value pairs for the route variables. For the previous route, we would do: +To build a URL, get the route and call the `URL()` method, passing a sequence of key/value pairs for the route variables. For the previous route, we would do: - url, err := r.Get("article").URL("category", "technology", "id", "42") +```go +url, err := r.Get("article").URL("category", "technology", "id", "42") +``` -...and the result will be a url.URL with the following path: +...and the result will be a `url.URL` with the following path: - "/articles/technology/42" +``` +"/articles/technology/42" +``` This also works for host variables: - r := mux.NewRouter() - r.Host("{subdomain}.domain.com"). - Path("/articles/{category}/{id:[0-9]+}"). - HandlerFunc(ArticleHandler). - Name("article") - - // url.String() will be "http://news.domain.com/articles/technology/42" - url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42") +```go +r := mux.NewRouter() +r.Host("{subdomain}.domain.com"). + Path("/articles/{category}/{id:[0-9]+}"). + HandlerFunc(ArticleHandler). + Name("article") + +// url.String() will be "http://news.domain.com/articles/technology/42" +url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42") +``` -All variables defined in the route are required, and their values must -conform to the corresponding patterns. These requirements guarantee that a -generated URL will always match a registered route -- the only exception is -for explicitly defined "build-only" routes which never match. +All variables defined in the route are required, and their values must conform to the corresponding patterns. These requirements guarantee that a generated URL will always match a registered route -- the only exception is for explicitly defined "build-only" routes which never match. Regex support also exists for matching Headers within a route. For example, we could do: - r.HeadersRegexp("Content-Type", "application/(text|json)") - -...and the route will match both requests with a Content-Type of `application/json` as well as -`application/text` +```go +r.HeadersRegexp("Content-Type", "application/(text|json)") +``` -There's also a way to build only the URL host or path for a route: -use the methods URLHost() or URLPath() instead. For the previous route, -we would do: +...and the route will match both requests with a Content-Type of `application/json` as well as `application/text` - // "http://news.domain.com/" - host, err := r.Get("article").URLHost("subdomain", "news") +There's also a way to build only the URL host or path for a route: use the methods `URLHost()` or `URLPath()` instead. For the previous route, we would do: - // "/articles/technology/42" - path, err := r.Get("article").URLPath("category", "technology", "id", "42") +```go +// "http://news.domain.com/" +host, err := r.Get("article").URLHost("subdomain", "news") -And if you use subrouters, host and path defined separately can be built -as well: +// "/articles/technology/42" +path, err := r.Get("article").URLPath("category", "technology", "id", "42") +``` - r := mux.NewRouter() - s := r.Host("{subdomain}.domain.com").Subrouter() - s.Path("/articles/{category}/{id:[0-9]+}"). - HandlerFunc(ArticleHandler). - Name("article") +And if you use subrouters, host and path defined separately can be built as well: - // "http://news.domain.com/articles/technology/42" - url, err := r.Get("article").URL("subdomain", "news", - "category", "technology", - "id", "42") +```go +r := mux.NewRouter() +s := r.Host("{subdomain}.domain.com").Subrouter() +s.Path("/articles/{category}/{id:[0-9]+}"). + HandlerFunc(ArticleHandler). + Name("article") + +// "http://news.domain.com/articles/technology/42" +url, err := r.Get("article").URL("subdomain", "news", + "category", "technology", + "id", "42") +``` ## Full Example -Here's a complete, runnable example of a small mux based server: +Here's a complete, runnable example of a small `mux` based server: ```go package main diff --git a/vendor/github.com/gorilla/mux/mux.go b/vendor/github.com/gorilla/mux/mux.go index 68c4ea5d8..aabe9958f 100644 --- a/vendor/github.com/gorilla/mux/mux.go +++ b/vendor/github.com/gorilla/mux/mux.go @@ -59,6 +59,12 @@ func (r *Router) Match(req *http.Request, match *RouteMatch) bool { return true } } + + // Closest match for a router (includes sub-routers) + if r.NotFoundHandler != nil { + match.Handler = r.NotFoundHandler + return true + } return false } @@ -89,10 +95,7 @@ func (r *Router) ServeHTTP(w http.ResponseWriter, req *http.Request) { setCurrentRoute(req, match.Route) } if handler == nil { - handler = r.NotFoundHandler - if handler == nil { - handler = http.NotFoundHandler() - } + handler = http.NotFoundHandler() } if !r.KeepContext { defer context.Clear(req) @@ -324,11 +327,15 @@ func CurrentRoute(r *http.Request) *Route { } func setVars(r *http.Request, val interface{}) { - context.Set(r, varsKey, val) + if val != nil { + context.Set(r, varsKey, val) + } } func setCurrentRoute(r *http.Request, val interface{}) { - context.Set(r, routeKey, val) + if val != nil { + context.Set(r, routeKey, val) + } } // ---------------------------------------------------------------------------- diff --git a/vendor/github.com/hashicorp/consul/api/README.md b/vendor/github.com/hashicorp/consul/api/README.md index 6fcdd2033..7e64988f4 100644 --- a/vendor/github.com/hashicorp/consul/api/README.md +++ b/vendor/github.com/hashicorp/consul/api/README.md @@ -9,7 +9,7 @@ Currently, all of the Consul APIs included in version 0.6.0 are supported. Documentation ============= -The full documentation is available on [Godoc](http://godoc.org/github.com/hashicorp/consul/api) +The full documentation is available on [Godoc](https://godoc.org/github.com/hashicorp/consul/api) Usage ===== @@ -41,4 +41,3 @@ if err != nil { fmt.Printf("KV: %v", pair) ``` - diff --git a/vendor/github.com/hashicorp/consul/api/api.go b/vendor/github.com/hashicorp/consul/api/api.go index ad89dc50e..0a2a76e5d 100644 --- a/vendor/github.com/hashicorp/consul/api/api.go +++ b/vendor/github.com/hashicorp/consul/api/api.go @@ -261,9 +261,31 @@ func (r *request) setQueryOptions(q *QueryOptions) { } } -// durToMsec converts a duration to a millisecond specified string +// durToMsec converts a duration to a millisecond specified string. If the +// user selected a positive value that rounds to 0 ms, then we will use 1 ms +// so they get a short delay, otherwise Consul will translate the 0 ms into +// a huge default delay. func durToMsec(dur time.Duration) string { - return fmt.Sprintf("%dms", dur/time.Millisecond) + ms := dur / time.Millisecond + if dur > 0 && ms == 0 { + ms = 1 + } + return fmt.Sprintf("%dms", ms) +} + +// serverError is a string we look for to detect 500 errors. +const serverError = "Unexpected response code: 500" + +// IsServerError returns true for 500 errors from the Consul servers, these are +// usually retryable at a later time. +func IsServerError(err error) bool { + if err == nil { + return false + } + + // TODO (slackpad) - Make a real error type here instead of using + // a string check. + return strings.Contains(err.Error(), serverError) } // setWriteOptions is used to annotate the request with diff --git a/vendor/github.com/hashicorp/consul/api/lock.go b/vendor/github.com/hashicorp/consul/api/lock.go index c1f6edf82..08e8e7931 100644 --- a/vendor/github.com/hashicorp/consul/api/lock.go +++ b/vendor/github.com/hashicorp/consul/api/lock.go @@ -2,7 +2,6 @@ package api import ( "fmt" - "strings" "sync" "time" ) @@ -29,7 +28,8 @@ const ( // DefaultMonitorRetryTime is how long we wait after a failed monitor check // of a lock (500 response code). This allows the monitor to ride out brief // periods of unavailability, subject to the MonitorRetries setting in the - // lock options which is by default set to 0, disabling this feature. + // lock options which is by default set to 0, disabling this feature. This + // affects locks and semaphores. DefaultMonitorRetryTime = 2 * time.Second // LockFlagValue is a magic flag we set to indicate a key @@ -56,7 +56,7 @@ var ( ) // Lock is used to implement client-side leader election. It is follows the -// algorithm as described here: https://consul.io/docs/guides/leader-election.html. +// algorithm as described here: https://www.consul.io/docs/guides/leader-election.html. type Lock struct { c *Client opts *LockOptions @@ -76,6 +76,8 @@ type LockOptions struct { SessionTTL string // Optional, defaults to DefaultLockSessionTTL MonitorRetries int // Optional, defaults to 0 which means no retries MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime + LockWaitTime time.Duration // Optional, defaults to DefaultLockWaitTime + LockTryOnce bool // Optional, defaults to false which means try forever } // LockKey returns a handle to a lock struct which can be used @@ -108,6 +110,9 @@ func (c *Client) LockOpts(opts *LockOptions) (*Lock, error) { if opts.MonitorRetryTime == 0 { opts.MonitorRetryTime = DefaultMonitorRetryTime } + if opts.LockWaitTime == 0 { + opts.LockWaitTime = DefaultLockWaitTime + } l := &Lock{ c: c, opts: opts, @@ -158,9 +163,11 @@ func (l *Lock) Lock(stopCh <-chan struct{}) (<-chan struct{}, error) { // Setup the query options kv := l.c.KV() qOpts := &QueryOptions{ - WaitTime: DefaultLockWaitTime, + WaitTime: l.opts.LockWaitTime, } + start := time.Now() + attempts := 0 WAIT: // Check if we should quit select { @@ -169,6 +176,17 @@ WAIT: default: } + // Handle the one-shot mode. + if l.opts.LockTryOnce && attempts > 0 { + elapsed := time.Now().Sub(start) + if elapsed > qOpts.WaitTime { + return nil, nil + } + + qOpts.WaitTime -= elapsed + } + attempts++ + // Look for an existing lock, blocking until not taken pair, meta, err := kv.Get(l.opts.Key, qOpts) if err != nil { @@ -343,15 +361,11 @@ WAIT: RETRY: pair, meta, err := kv.Get(l.opts.Key, opts) if err != nil { - // TODO (slackpad) - Make a real error type here instead of using - // a string check. - const serverError = "Unexpected response code: 500" - // If configured we can try to ride out a brief Consul unavailability // by doing retries. Note that we have to attempt the retry in a non- // blocking fashion so that we have a clean place to reset the retry // counter if service is restored. - if retries > 0 && strings.Contains(err.Error(), serverError) { + if retries > 0 && IsServerError(err) { time.Sleep(l.opts.MonitorRetryTime) retries-- opts.WaitIndex = 0 diff --git a/vendor/github.com/hashicorp/consul/api/semaphore.go b/vendor/github.com/hashicorp/consul/api/semaphore.go index 4e70be2e7..e6645ac1d 100644 --- a/vendor/github.com/hashicorp/consul/api/semaphore.go +++ b/vendor/github.com/hashicorp/consul/api/semaphore.go @@ -63,12 +63,16 @@ type Semaphore struct { // SemaphoreOptions is used to parameterize the Semaphore type SemaphoreOptions struct { - Prefix string // Must be set and have write permissions - Limit int // Must be set, and be positive - Value []byte // Optional, value to associate with the contender entry - Session string // Optional, created if not specified - SessionName string // Optional, defaults to DefaultLockSessionName - SessionTTL string // Optional, defaults to DefaultLockSessionTTL + Prefix string // Must be set and have write permissions + Limit int // Must be set, and be positive + Value []byte // Optional, value to associate with the contender entry + Session string // Optional, created if not specified + SessionName string // Optional, defaults to DefaultLockSessionName + SessionTTL string // Optional, defaults to DefaultLockSessionTTL + MonitorRetries int // Optional, defaults to 0 which means no retries + MonitorRetryTime time.Duration // Optional, defaults to DefaultMonitorRetryTime + SemaphoreWaitTime time.Duration // Optional, defaults to DefaultSemaphoreWaitTime + SemaphoreTryOnce bool // Optional, defaults to false which means try forever } // semaphoreLock is written under the DefaultSemaphoreKey and @@ -115,6 +119,12 @@ func (c *Client) SemaphoreOpts(opts *SemaphoreOptions) (*Semaphore, error) { return nil, fmt.Errorf("invalid SessionTTL: %v", err) } } + if opts.MonitorRetryTime == 0 { + opts.MonitorRetryTime = DefaultMonitorRetryTime + } + if opts.SemaphoreWaitTime == 0 { + opts.SemaphoreWaitTime = DefaultSemaphoreWaitTime + } s := &Semaphore{ c: c, opts: opts, @@ -172,9 +182,11 @@ func (s *Semaphore) Acquire(stopCh <-chan struct{}) (<-chan struct{}, error) { // Setup the query options qOpts := &QueryOptions{ - WaitTime: DefaultSemaphoreWaitTime, + WaitTime: s.opts.SemaphoreWaitTime, } + start := time.Now() + attempts := 0 WAIT: // Check if we should quit select { @@ -183,6 +195,17 @@ WAIT: default: } + // Handle the one-shot mode. + if s.opts.SemaphoreTryOnce && attempts > 0 { + elapsed := time.Now().Sub(start) + if elapsed > qOpts.WaitTime { + return nil, nil + } + + qOpts.WaitTime -= elapsed + } + attempts++ + // Read the prefix pairs, meta, err := kv.List(s.opts.Prefix, qOpts) if err != nil { @@ -460,8 +483,20 @@ func (s *Semaphore) monitorLock(session string, stopCh chan struct{}) { kv := s.c.KV() opts := &QueryOptions{RequireConsistent: true} WAIT: + retries := s.opts.MonitorRetries +RETRY: pairs, meta, err := kv.List(s.opts.Prefix, opts) if err != nil { + // If configured we can try to ride out a brief Consul unavailability + // by doing retries. Note that we have to attempt the retry in a non- + // blocking fashion so that we have a clean place to reset the retry + // counter if service is restored. + if retries > 0 && IsServerError(err) { + time.Sleep(s.opts.MonitorRetryTime) + retries-- + opts.WaitIndex = 0 + goto RETRY + } return } lockPair := s.findLock(pairs) diff --git a/vendor/github.com/opencontainers/runc/libcontainer/selinux/selinux.go b/vendor/github.com/opencontainers/runc/libcontainer/selinux/selinux.go index 2771bb50e..88d612cad 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/selinux/selinux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/selinux/selinux.go @@ -231,10 +231,14 @@ func ReserveLabel(scon string) { } } +func selinuxEnforcePath() string { + return fmt.Sprintf("%s/enforce", selinuxPath) +} + func SelinuxGetEnforce() int { var enforce int - enforceS, err := readCon(fmt.Sprintf("%s/enforce", selinuxPath)) + enforceS, err := readCon(selinuxEnforcePath()) if err != nil { return -1 } @@ -246,6 +250,10 @@ func SelinuxGetEnforce() int { return enforce } +func SelinuxSetEnforce(mode int) error { + return writeCon(selinuxEnforcePath(), fmt.Sprintf("%d", mode)) +} + func SelinuxGetEnforceMode() int { switch readConfig(selinuxTag) { case "enforcing": diff --git a/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go b/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go index 2cc3ef803..6c835e68e 100644 --- a/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go +++ b/vendor/github.com/opencontainers/runc/libcontainer/system/linux.go @@ -3,6 +3,9 @@ package system import ( + "bufio" + "fmt" + "os" "os/exec" "syscall" "unsafe" @@ -75,3 +78,37 @@ func Setctty() error { } return nil } + +/* + * Detect whether we are currently running in a user namespace. + * Copied from github.com/lxc/lxd/shared/util.go + */ +func RunningInUserNS() bool { + file, err := os.Open("/proc/self/uid_map") + if err != nil { + /* + * This kernel-provided file only exists if user namespaces are + * supported + */ + return false + } + defer file.Close() + + buf := bufio.NewReader(file) + l, _, err := buf.ReadLine() + if err != nil { + return false + } + + line := string(l) + var a, b, c int64 + fmt.Sscanf(line, "%d %d %d", &a, &b, &c) + /* + * We assume we are in the initial user namespace if we have a full + * range - 4294967295 uids starting at uid 0. + */ + if a == 0 && b == 0 && c == 4294967295 { + return false + } + return true +} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS b/vendor/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS deleted file mode 100644 index edbe20066..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS +++ /dev/null @@ -1,2 +0,0 @@ -Tianon Gravi (@tianon) -Aleksa Sarai (@cyphar) diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup.go b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup.go deleted file mode 100644 index 6f8a982ff..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup.go +++ /dev/null @@ -1,108 +0,0 @@ -package user - -import ( - "errors" - "fmt" - "syscall" -) - -var ( - // The current operating system does not provide the required data for user lookups. - ErrUnsupported = errors.New("user lookup: operating system does not provide passwd-formatted data") -) - -func lookupUser(filter func(u User) bool) (User, error) { - // Get operating system-specific passwd reader-closer. - passwd, err := GetPasswd() - if err != nil { - return User{}, err - } - defer passwd.Close() - - // Get the users. - users, err := ParsePasswdFilter(passwd, filter) - if err != nil { - return User{}, err - } - - // No user entries found. - if len(users) == 0 { - return User{}, fmt.Errorf("no matching entries in passwd file") - } - - // Assume the first entry is the "correct" one. - return users[0], nil -} - -// CurrentUser looks up the current user by their user id in /etc/passwd. If the -// user cannot be found (or there is no /etc/passwd file on the filesystem), -// then CurrentUser returns an error. -func CurrentUser() (User, error) { - return LookupUid(syscall.Getuid()) -} - -// LookupUser looks up a user by their username in /etc/passwd. If the user -// cannot be found (or there is no /etc/passwd file on the filesystem), then -// LookupUser returns an error. -func LookupUser(username string) (User, error) { - return lookupUser(func(u User) bool { - return u.Name == username - }) -} - -// LookupUid looks up a user by their user id in /etc/passwd. If the user cannot -// be found (or there is no /etc/passwd file on the filesystem), then LookupId -// returns an error. -func LookupUid(uid int) (User, error) { - return lookupUser(func(u User) bool { - return u.Uid == uid - }) -} - -func lookupGroup(filter func(g Group) bool) (Group, error) { - // Get operating system-specific group reader-closer. - group, err := GetGroup() - if err != nil { - return Group{}, err - } - defer group.Close() - - // Get the users. - groups, err := ParseGroupFilter(group, filter) - if err != nil { - return Group{}, err - } - - // No user entries found. - if len(groups) == 0 { - return Group{}, fmt.Errorf("no matching entries in group file") - } - - // Assume the first entry is the "correct" one. - return groups[0], nil -} - -// CurrentGroup looks up the current user's group by their primary group id's -// entry in /etc/passwd. If the group cannot be found (or there is no -// /etc/group file on the filesystem), then CurrentGroup returns an error. -func CurrentGroup() (Group, error) { - return LookupGid(syscall.Getgid()) -} - -// LookupGroup looks up a group by its name in /etc/group. If the group cannot -// be found (or there is no /etc/group file on the filesystem), then LookupGroup -// returns an error. -func LookupGroup(groupname string) (Group, error) { - return lookupGroup(func(g Group) bool { - return g.Name == groupname - }) -} - -// LookupGid looks up a group by its group id in /etc/group. If the group cannot -// be found (or there is no /etc/group file on the filesystem), then LookupGid -// returns an error. -func LookupGid(gid int) (Group, error) { - return lookupGroup(func(g Group) bool { - return g.Gid == gid - }) -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go deleted file mode 100644 index 758b734c2..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build darwin dragonfly freebsd linux netbsd openbsd solaris - -package user - -import ( - "io" - "os" -) - -// Unix-specific path to the passwd and group formatted files. -const ( - unixPasswdPath = "/etc/passwd" - unixGroupPath = "/etc/group" -) - -func GetPasswdPath() (string, error) { - return unixPasswdPath, nil -} - -func GetPasswd() (io.ReadCloser, error) { - return os.Open(unixPasswdPath) -} - -func GetGroupPath() (string, error) { - return unixGroupPath, nil -} - -func GetGroup() (io.ReadCloser, error) { - return os.Open(unixGroupPath) -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go b/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go deleted file mode 100644 index 721794887..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris - -package user - -import "io" - -func GetPasswdPath() (string, error) { - return "", ErrUnsupported -} - -func GetPasswd() (io.ReadCloser, error) { - return nil, ErrUnsupported -} - -func GetGroupPath() (string, error) { - return "", ErrUnsupported -} - -func GetGroup() (io.ReadCloser, error) { - return nil, ErrUnsupported -} diff --git a/vendor/github.com/opencontainers/runc/libcontainer/user/user.go b/vendor/github.com/opencontainers/runc/libcontainer/user/user.go deleted file mode 100644 index e6375ea4d..000000000 --- a/vendor/github.com/opencontainers/runc/libcontainer/user/user.go +++ /dev/null @@ -1,418 +0,0 @@ -package user - -import ( - "bufio" - "fmt" - "io" - "os" - "strconv" - "strings" -) - -const ( - minId = 0 - maxId = 1<<31 - 1 //for 32-bit systems compatibility -) - -var ( - ErrRange = fmt.Errorf("Uids and gids must be in range %d-%d", minId, maxId) -) - -type User struct { - Name string - Pass string - Uid int - Gid int - Gecos string - Home string - Shell string -} - -type Group struct { - Name string - Pass string - Gid int - List []string -} - -func parseLine(line string, v ...interface{}) { - if line == "" { - return - } - - parts := strings.Split(line, ":") - for i, p := range parts { - if len(v) <= i { - // if we have more "parts" than we have places to put them, bail for great "tolerance" of naughty configuration files - break - } - - switch e := v[i].(type) { - case *string: - // "root", "adm", "/bin/bash" - *e = p - case *int: - // "0", "4", "1000" - // ignore string to int conversion errors, for great "tolerance" of naughty configuration files - *e, _ = strconv.Atoi(p) - case *[]string: - // "", "root", "root,adm,daemon" - if p != "" { - *e = strings.Split(p, ",") - } else { - *e = []string{} - } - default: - // panic, because this is a programming/logic error, not a runtime one - panic("parseLine expects only pointers! argument " + strconv.Itoa(i) + " is not a pointer!") - } - } -} - -func ParsePasswdFile(path string) ([]User, error) { - passwd, err := os.Open(path) - if err != nil { - return nil, err - } - defer passwd.Close() - return ParsePasswd(passwd) -} - -func ParsePasswd(passwd io.Reader) ([]User, error) { - return ParsePasswdFilter(passwd, nil) -} - -func ParsePasswdFileFilter(path string, filter func(User) bool) ([]User, error) { - passwd, err := os.Open(path) - if err != nil { - return nil, err - } - defer passwd.Close() - return ParsePasswdFilter(passwd, filter) -} - -func ParsePasswdFilter(r io.Reader, filter func(User) bool) ([]User, error) { - if r == nil { - return nil, fmt.Errorf("nil source for passwd-formatted data") - } - - var ( - s = bufio.NewScanner(r) - out = []User{} - ) - - for s.Scan() { - if err := s.Err(); err != nil { - return nil, err - } - - text := strings.TrimSpace(s.Text()) - if text == "" { - continue - } - - // see: man 5 passwd - // name:password:UID:GID:GECOS:directory:shell - // Name:Pass:Uid:Gid:Gecos:Home:Shell - // root:x:0:0:root:/root:/bin/bash - // adm:x:3:4:adm:/var/adm:/bin/false - p := User{} - parseLine( - text, - &p.Name, &p.Pass, &p.Uid, &p.Gid, &p.Gecos, &p.Home, &p.Shell, - ) - - if filter == nil || filter(p) { - out = append(out, p) - } - } - - return out, nil -} - -func ParseGroupFile(path string) ([]Group, error) { - group, err := os.Open(path) - if err != nil { - return nil, err - } - defer group.Close() - return ParseGroup(group) -} - -func ParseGroup(group io.Reader) ([]Group, error) { - return ParseGroupFilter(group, nil) -} - -func ParseGroupFileFilter(path string, filter func(Group) bool) ([]Group, error) { - group, err := os.Open(path) - if err != nil { - return nil, err - } - defer group.Close() - return ParseGroupFilter(group, filter) -} - -func ParseGroupFilter(r io.Reader, filter func(Group) bool) ([]Group, error) { - if r == nil { - return nil, fmt.Errorf("nil source for group-formatted data") - } - - var ( - s = bufio.NewScanner(r) - out = []Group{} - ) - - for s.Scan() { - if err := s.Err(); err != nil { - return nil, err - } - - text := s.Text() - if text == "" { - continue - } - - // see: man 5 group - // group_name:password:GID:user_list - // Name:Pass:Gid:List - // root:x:0:root - // adm:x:4:root,adm,daemon - p := Group{} - parseLine( - text, - &p.Name, &p.Pass, &p.Gid, &p.List, - ) - - if filter == nil || filter(p) { - out = append(out, p) - } - } - - return out, nil -} - -type ExecUser struct { - Uid, Gid int - Sgids []int - Home string -} - -// GetExecUserPath is a wrapper for GetExecUser. It reads data from each of the -// given file paths and uses that data as the arguments to GetExecUser. If the -// files cannot be opened for any reason, the error is ignored and a nil -// io.Reader is passed instead. -func GetExecUserPath(userSpec string, defaults *ExecUser, passwdPath, groupPath string) (*ExecUser, error) { - passwd, err := os.Open(passwdPath) - if err != nil { - passwd = nil - } else { - defer passwd.Close() - } - - group, err := os.Open(groupPath) - if err != nil { - group = nil - } else { - defer group.Close() - } - - return GetExecUser(userSpec, defaults, passwd, group) -} - -// GetExecUser parses a user specification string (using the passwd and group -// readers as sources for /etc/passwd and /etc/group data, respectively). In -// the case of blank fields or missing data from the sources, the values in -// defaults is used. -// -// GetExecUser will return an error if a user or group literal could not be -// found in any entry in passwd and group respectively. -// -// Examples of valid user specifications are: -// * "" -// * "user" -// * "uid" -// * "user:group" -// * "uid:gid -// * "user:gid" -// * "uid:group" -func GetExecUser(userSpec string, defaults *ExecUser, passwd, group io.Reader) (*ExecUser, error) { - var ( - userArg, groupArg string - name string - ) - - if defaults == nil { - defaults = new(ExecUser) - } - - // Copy over defaults. - user := &ExecUser{ - Uid: defaults.Uid, - Gid: defaults.Gid, - Sgids: defaults.Sgids, - Home: defaults.Home, - } - - // Sgids slice *cannot* be nil. - if user.Sgids == nil { - user.Sgids = []int{} - } - - // allow for userArg to have either "user" syntax, or optionally "user:group" syntax - parseLine(userSpec, &userArg, &groupArg) - - users, err := ParsePasswdFilter(passwd, func(u User) bool { - if userArg == "" { - return u.Uid == user.Uid - } - return u.Name == userArg || strconv.Itoa(u.Uid) == userArg - }) - if err != nil && passwd != nil { - if userArg == "" { - userArg = strconv.Itoa(user.Uid) - } - return nil, fmt.Errorf("Unable to find user %v: %v", userArg, err) - } - - haveUser := users != nil && len(users) > 0 - if haveUser { - // if we found any user entries that matched our filter, let's take the first one as "correct" - name = users[0].Name - user.Uid = users[0].Uid - user.Gid = users[0].Gid - user.Home = users[0].Home - } else if userArg != "" { - // we asked for a user but didn't find them... let's check to see if we wanted a numeric user - user.Uid, err = strconv.Atoi(userArg) - if err != nil { - // not numeric - we have to bail - return nil, fmt.Errorf("Unable to find user %v", userArg) - } - - // Must be inside valid uid range. - if user.Uid < minId || user.Uid > maxId { - return nil, ErrRange - } - - // if userArg couldn't be found in /etc/passwd but is numeric, just roll with it - this is legit - } - - if groupArg != "" || name != "" { - groups, err := ParseGroupFilter(group, func(g Group) bool { - // Explicit group format takes precedence. - if groupArg != "" { - return g.Name == groupArg || strconv.Itoa(g.Gid) == groupArg - } - - // Check if user is a member. - for _, u := range g.List { - if u == name { - return true - } - } - - return false - }) - if err != nil && group != nil { - return nil, fmt.Errorf("Unable to find groups for user %v: %v", users[0].Name, err) - } - - haveGroup := groups != nil && len(groups) > 0 - if groupArg != "" { - if haveGroup { - // if we found any group entries that matched our filter, let's take the first one as "correct" - user.Gid = groups[0].Gid - } else { - // we asked for a group but didn't find id... let's check to see if we wanted a numeric group - user.Gid, err = strconv.Atoi(groupArg) - if err != nil { - // not numeric - we have to bail - return nil, fmt.Errorf("Unable to find group %v", groupArg) - } - - // Ensure gid is inside gid range. - if user.Gid < minId || user.Gid > maxId { - return nil, ErrRange - } - - // if groupArg couldn't be found in /etc/group but is numeric, just roll with it - this is legit - } - } else if haveGroup { - // If implicit group format, fill supplementary gids. - user.Sgids = make([]int, len(groups)) - for i, group := range groups { - user.Sgids[i] = group.Gid - } - } - } - - return user, nil -} - -// GetAdditionalGroups looks up a list of groups by name or group id -// against the given /etc/group formatted data. If a group name cannot -// be found, an error will be returned. If a group id cannot be found, -// or the given group data is nil, the id will be returned as-is -// provided it is in the legal range. -func GetAdditionalGroups(additionalGroups []string, group io.Reader) ([]int, error) { - var groups = []Group{} - if group != nil { - var err error - groups, err = ParseGroupFilter(group, func(g Group) bool { - for _, ag := range additionalGroups { - if g.Name == ag || strconv.Itoa(g.Gid) == ag { - return true - } - } - return false - }) - if err != nil { - return nil, fmt.Errorf("Unable to find additional groups %v: %v", additionalGroups, err) - } - } - - gidMap := make(map[int]struct{}) - for _, ag := range additionalGroups { - var found bool - for _, g := range groups { - // if we found a matched group either by name or gid, take the - // first matched as correct - if g.Name == ag || strconv.Itoa(g.Gid) == ag { - if _, ok := gidMap[g.Gid]; !ok { - gidMap[g.Gid] = struct{}{} - found = true - break - } - } - } - // we asked for a group but didn't find it. let's check to see - // if we wanted a numeric group - if !found { - gid, err := strconv.Atoi(ag) - if err != nil { - return nil, fmt.Errorf("Unable to find group %s", ag) - } - // Ensure gid is inside gid range. - if gid < minId || gid > maxId { - return nil, ErrRange - } - gidMap[gid] = struct{}{} - } - } - gids := []int{} - for gid := range gidMap { - gids = append(gids, gid) - } - return gids, nil -} - -// GetAdditionalGroupsPath is a wrapper around GetAdditionalGroups -// that opens the groupPath given and gives it as an argument to -// GetAdditionalGroups. -func GetAdditionalGroupsPath(additionalGroups []string, groupPath string) ([]int, error) { - group, err := os.Open(groupPath) - if err == nil { - defer group.Close() - } - return GetAdditionalGroups(additionalGroups, group) -} diff --git a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go index e5005daaa..003e99fad 100644 --- a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go +++ b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go @@ -585,13 +585,15 @@ func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { if len(diff.ToDate) > 0 { toDate = "\t" + diff.ToDate } - err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol) - if err != nil { - return err - } - err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol) - if err != nil { - return err + if diff.FromFile != "" || diff.ToFile != "" { + err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol) + if err != nil { + return err + } + err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol) + if err != nil { + return err + } } } first, last := g[0], g[len(g)-1] @@ -710,8 +712,10 @@ func WriteContextDiff(writer io.Writer, diff ContextDiff) error { if len(diff.ToDate) > 0 { toDate = "\t" + diff.ToDate } - wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol) - wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol) + if diff.FromFile != "" || diff.ToFile != "" { + wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol) + wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol) + } } first, last := g[0], g[len(g)-1] diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go b/vendor/github.com/stretchr/testify/assert/assertion_forward.go new file mode 100644 index 000000000..e6a796046 --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go @@ -0,0 +1,387 @@ +/* +* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen +* THIS FILE MUST NOT BE EDITED BY HAND +*/ + +package assert + +import ( + + http "net/http" + url "net/url" + time "time" +) + + +// Condition uses a Comparison to assert a complex condition. +func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool { + return Condition(a.t, comp, msgAndArgs...) +} + + +// Contains asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// a.Contains("Hello World", "World", "But 'Hello World' does contain 'World'") +// a.Contains(["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'") +// a.Contains({"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { + return Contains(a.t, s, contains, msgAndArgs...) +} + + +// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// a.Empty(obj) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { + return Empty(a.t, object, msgAndArgs...) +} + + +// Equal asserts that two objects are equal. +// +// a.Equal(123, 123, "123 and 123 should be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + return Equal(a.t, expected, actual, msgAndArgs...) +} + + +// EqualError asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// if assert.Error(t, err, "An error was expected") { +// assert.Equal(t, err, expectedError) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool { + return EqualError(a.t, theError, errString, msgAndArgs...) +} + + +// EqualValues asserts that two objects are equal or convertable to the same types +// and equal. +// +// a.EqualValues(uint32(123), int32(123), "123 and 123 should be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + return EqualValues(a.t, expected, actual, msgAndArgs...) +} + + +// Error asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if a.Error(err, "An error was expected") { +// assert.Equal(t, err, expectedError) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool { + return Error(a.t, err, msgAndArgs...) +} + + +// Exactly asserts that two objects are equal is value and type. +// +// a.Exactly(int32(123), int64(123), "123 and 123 should NOT be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + return Exactly(a.t, expected, actual, msgAndArgs...) +} + + +// Fail reports a failure through +func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool { + return Fail(a.t, failureMessage, msgAndArgs...) +} + + +// FailNow fails test +func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) bool { + return FailNow(a.t, failureMessage, msgAndArgs...) +} + + +// False asserts that the specified value is false. +// +// a.False(myBool, "myBool should be false") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool { + return False(a.t, value, msgAndArgs...) +} + + +// HTTPBodyContains asserts that a specified handler returns a +// body that contains a string. +// +// a.HTTPBodyContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) bool { + return HTTPBodyContains(a.t, handler, method, url, values, str) +} + + +// HTTPBodyNotContains asserts that a specified handler returns a +// body that does not contain a string. +// +// a.HTTPBodyNotContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) bool { + return HTTPBodyNotContains(a.t, handler, method, url, values, str) +} + + +// HTTPError asserts that a specified handler returns an error status code. +// +// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values) bool { + return HTTPError(a.t, handler, method, url, values) +} + + +// HTTPRedirect asserts that a specified handler returns a redirect status code. +// +// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values) bool { + return HTTPRedirect(a.t, handler, method, url, values) +} + + +// HTTPSuccess asserts that a specified handler returns a success status code. +// +// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values) bool { + return HTTPSuccess(a.t, handler, method, url, values) +} + + +// Implements asserts that an object is implemented by the specified interface. +// +// a.Implements((*MyInterface)(nil), new(MyObject), "MyObject") +func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { + return Implements(a.t, interfaceObject, object, msgAndArgs...) +} + + +// InDelta asserts that the two numerals are within delta of each other. +// +// a.InDelta(math.Pi, (22 / 7.0), 0.01) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + return InDelta(a.t, expected, actual, delta, msgAndArgs...) +} + + +// InDeltaSlice is the same as InDelta, except it compares two slices. +func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + return InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...) +} + + +// InEpsilon asserts that expected and actual have a relative error less than epsilon +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { + return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...) +} + + +// InEpsilonSlice is the same as InEpsilon, except it compares two slices. +func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { + return InEpsilonSlice(a.t, expected, actual, delta, msgAndArgs...) +} + + +// IsType asserts that the specified objects are of the same type. +func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { + return IsType(a.t, expectedType, object, msgAndArgs...) +} + + +// JSONEq asserts that two JSON strings are equivalent. +// +// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool { + return JSONEq(a.t, expected, actual, msgAndArgs...) +} + + +// Len asserts that the specified object has specific length. +// Len also fails if the object has a type that len() not accept. +// +// a.Len(mySlice, 3, "The size of slice is not 3") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool { + return Len(a.t, object, length, msgAndArgs...) +} + + +// Nil asserts that the specified object is nil. +// +// a.Nil(err, "err should be nothing") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool { + return Nil(a.t, object, msgAndArgs...) +} + + +// NoError asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if a.NoError(err) { +// assert.Equal(t, actualObj, expectedObj) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) bool { + return NoError(a.t, err, msgAndArgs...) +} + + +// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// a.NotContains("Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'") +// a.NotContains(["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'") +// a.NotContains({"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) bool { + return NotContains(a.t, s, contains, msgAndArgs...) +} + + +// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if a.NotEmpty(obj) { +// assert.Equal(t, "two", obj[1]) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool { + return NotEmpty(a.t, object, msgAndArgs...) +} + + +// NotEqual asserts that the specified values are NOT equal. +// +// a.NotEqual(obj1, obj2, "two objects shouldn't be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + return NotEqual(a.t, expected, actual, msgAndArgs...) +} + + +// NotNil asserts that the specified object is not nil. +// +// a.NotNil(err, "err should be something") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool { + return NotNil(a.t, object, msgAndArgs...) +} + + +// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// a.NotPanics(func(){ +// RemainCalm() +// }, "Calling RemainCalm() should NOT panic") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool { + return NotPanics(a.t, f, msgAndArgs...) +} + + +// NotRegexp asserts that a specified regexp does not match a string. +// +// a.NotRegexp(regexp.MustCompile("starts"), "it's starting") +// a.NotRegexp("^start", "it's not starting") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + return NotRegexp(a.t, rx, str, msgAndArgs...) +} + + +// NotZero asserts that i is not the zero value for its type and returns the truth. +func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) bool { + return NotZero(a.t, i, msgAndArgs...) +} + + +// Panics asserts that the code inside the specified PanicTestFunc panics. +// +// a.Panics(func(){ +// GoCrazy() +// }, "Calling GoCrazy() should panic") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool { + return Panics(a.t, f, msgAndArgs...) +} + + +// Regexp asserts that a specified regexp matches a string. +// +// a.Regexp(regexp.MustCompile("start"), "it's starting") +// a.Regexp("start...$", "it's not starting") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { + return Regexp(a.t, rx, str, msgAndArgs...) +} + + +// True asserts that the specified value is true. +// +// a.True(myBool, "myBool should be true") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool { + return True(a.t, value, msgAndArgs...) +} + + +// WithinDuration asserts that the two times are within duration delta of each other. +// +// a.WithinDuration(time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { + return WithinDuration(a.t, expected, actual, delta, msgAndArgs...) +} + + +// Zero asserts that i is the zero value for its type and returns the truth. +func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool { + return Zero(a.t, i, msgAndArgs...) +} diff --git a/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl b/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl new file mode 100644 index 000000000..99f9acfbb --- /dev/null +++ b/vendor/github.com/stretchr/testify/assert/assertion_forward.go.tmpl @@ -0,0 +1,4 @@ +{{.CommentWithoutT "a"}} +func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) bool { + return {{.DocInfo.Name}}(a.t, {{.ForwardedParams}}) +} diff --git a/vendor/github.com/stretchr/testify/assert/assertions.go b/vendor/github.com/stretchr/testify/assert/assertions.go index e95357d29..d7c16c590 100644 --- a/vendor/github.com/stretchr/testify/assert/assertions.go +++ b/vendor/github.com/stretchr/testify/assert/assertions.go @@ -181,6 +181,28 @@ func indentMessageLines(message string, tabs int) string { return outBuf.String() } +type failNower interface { + FailNow() +} + +// FailNow fails test +func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { + Fail(t, failureMessage, msgAndArgs...) + + // We cannot extend TestingT with FailNow() and + // maintain backwards compatibility, so we fallback + // to panicking when FailNow is not available in + // TestingT. + // See issue #263 + + if t, ok := t.(failNower); ok { + t.FailNow() + } else { + panic("test failed and t is missing `FailNow()`") + } + return false +} + // Fail reports a failure through func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) bool { @@ -363,6 +385,11 @@ func isEmpty(object interface{}) bool { { return (objValue.Len() == 0) } + case reflect.Struct: + switch object.(type) { + case time.Time: + return object.(time.Time).IsZero() + } case reflect.Ptr: { if objValue.IsNil() { @@ -739,42 +766,40 @@ func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAn return true } -// min(|expected|, |actual|) * epsilon -func calcEpsilonDelta(expected, actual interface{}, epsilon float64) float64 { +func calcRelativeError(expected, actual interface{}) (float64, error) { af, aok := toFloat(expected) - bf, bok := toFloat(actual) - - if !aok || !bok { - // invalid input - return 0 + if !aok { + return 0, fmt.Errorf("expected value %q cannot be converted to float", expected) } - - if af < 0 { - af = -af + if af == 0 { + return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error") } - if bf < 0 { - bf = -bf - } - var delta float64 - if af < bf { - delta = af * epsilon - } else { - delta = bf * epsilon + bf, bok := toFloat(actual) + if !bok { + return 0, fmt.Errorf("expected value %q cannot be converted to float", actual) } - return delta + + return math.Abs(af-bf) / math.Abs(af), nil } // InEpsilon asserts that expected and actual have a relative error less than epsilon // // Returns whether the assertion was successful (true) or not (false). func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { - delta := calcEpsilonDelta(expected, actual, epsilon) + actualEpsilon, err := calcRelativeError(expected, actual) + if err != nil { + return Fail(t, err.Error(), msgAndArgs...) + } + if actualEpsilon > epsilon { + return Fail(t, fmt.Sprintf("Relative error is too high: %#v (expected)\n"+ + " < %#v (actual)", actualEpsilon, epsilon), msgAndArgs...) + } - return InDelta(t, expected, actual, delta, msgAndArgs...) + return true } -// InEpsilonSlice is the same as InEpsilon, except it compares two slices. -func InEpsilonSlice(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { +// InEpsilonSlice is the same as InEpsilon, except it compares each value from two slices. +func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { if expected == nil || actual == nil || reflect.TypeOf(actual).Kind() != reflect.Slice || reflect.TypeOf(expected).Kind() != reflect.Slice { @@ -785,7 +810,7 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, delta float64, msg expectedSlice := reflect.ValueOf(expected) for i := 0; i < actualSlice.Len(); i++ { - result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), delta) + result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), epsilon) if !result { return result } diff --git a/vendor/github.com/stretchr/testify/assert/forward_assertions.go b/vendor/github.com/stretchr/testify/assert/forward_assertions.go index fe6b664e0..b867e95ea 100644 --- a/vendor/github.com/stretchr/testify/assert/forward_assertions.go +++ b/vendor/github.com/stretchr/testify/assert/forward_assertions.go @@ -1,7 +1,5 @@ package assert -import "time" - // Assertions provides assertion methods around the // TestingT interface. type Assertions struct { @@ -15,270 +13,4 @@ func New(t TestingT) *Assertions { } } -// Fail reports a failure through -func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) bool { - return Fail(a.t, failureMessage, msgAndArgs...) -} - -// Implements asserts that an object is implemented by the specified interface. -// -// assert.Implements((*MyInterface)(nil), new(MyObject), "MyObject") -func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool { - return Implements(a.t, interfaceObject, object, msgAndArgs...) -} - -// IsType asserts that the specified objects are of the same type. -func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { - return IsType(a.t, expectedType, object, msgAndArgs...) -} - -// Equal asserts that two objects are equal. -// -// assert.Equal(123, 123, "123 and 123 should be equal") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Equal(expected, actual interface{}, msgAndArgs ...interface{}) bool { - return Equal(a.t, expected, actual, msgAndArgs...) -} - -// EqualValues asserts that two objects are equal or convertable to the same types -// and equal. -// -// assert.EqualValues(uint32(123), int32(123), "123 and 123 should be equal") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) EqualValues(expected, actual interface{}, msgAndArgs ...interface{}) bool { - return EqualValues(a.t, expected, actual, msgAndArgs...) -} - -// Exactly asserts that two objects are equal is value and type. -// -// assert.Exactly(int32(123), int64(123), "123 and 123 should NOT be equal") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Exactly(expected, actual interface{}, msgAndArgs ...interface{}) bool { - return Exactly(a.t, expected, actual, msgAndArgs...) -} - -// NotNil asserts that the specified object is not nil. -// -// assert.NotNil(err, "err should be something") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) bool { - return NotNil(a.t, object, msgAndArgs...) -} - -// Nil asserts that the specified object is nil. -// -// assert.Nil(err, "err should be nothing") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) bool { - return Nil(a.t, object, msgAndArgs...) -} - -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or a -// slice with len == 0. -// -// assert.Empty(obj) -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { - return Empty(a.t, object, msgAndArgs...) -} - -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or a -// slice with len == 0. -// -// if assert.NotEmpty(obj) { -// assert.Equal("two", obj[1]) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) bool { - return NotEmpty(a.t, object, msgAndArgs...) -} - -// Len asserts that the specified object has specific length. -// Len also fails if the object has a type that len() not accept. -// -// assert.Len(mySlice, 3, "The size of slice is not 3") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) bool { - return Len(a.t, object, length, msgAndArgs...) -} - -// True asserts that the specified value is true. -// -// assert.True(myBool, "myBool should be true") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) True(value bool, msgAndArgs ...interface{}) bool { - return True(a.t, value, msgAndArgs...) -} - -// False asserts that the specified value is false. -// -// assert.False(myBool, "myBool should be false") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) False(value bool, msgAndArgs ...interface{}) bool { - return False(a.t, value, msgAndArgs...) -} - -// NotEqual asserts that the specified values are NOT equal. -// -// assert.NotEqual(obj1, obj2, "two objects shouldn't be equal") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) NotEqual(expected, actual interface{}, msgAndArgs ...interface{}) bool { - return NotEqual(a.t, expected, actual, msgAndArgs...) -} - -// Contains asserts that the specified string contains the specified substring. -// -// assert.Contains("Hello World", "World", "But 'Hello World' does contain 'World'") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Contains(s, contains interface{}, msgAndArgs ...interface{}) bool { - return Contains(a.t, s, contains, msgAndArgs...) -} - -// NotContains asserts that the specified string does NOT contain the specified substring. -// -// assert.NotContains("Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) NotContains(s, contains interface{}, msgAndArgs ...interface{}) bool { - return NotContains(a.t, s, contains, msgAndArgs...) -} - -// Condition uses a Comparison to assert a complex condition. -func (a *Assertions) Condition(comp Comparison, msgAndArgs ...interface{}) bool { - return Condition(a.t, comp, msgAndArgs...) -} - -// Panics asserts that the code inside the specified PanicTestFunc panics. -// -// assert.Panics(func(){ -// GoCrazy() -// }, "Calling GoCrazy() should panic") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Panics(f PanicTestFunc, msgAndArgs ...interface{}) bool { - return Panics(a.t, f, msgAndArgs...) -} - -// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. -// -// assert.NotPanics(func(){ -// RemainCalm() -// }, "Calling RemainCalm() should NOT panic") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) NotPanics(f PanicTestFunc, msgAndArgs ...interface{}) bool { - return NotPanics(a.t, f, msgAndArgs...) -} - -// WithinDuration asserts that the two times are within duration delta of each other. -// -// assert.WithinDuration(time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) WithinDuration(expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) bool { - return WithinDuration(a.t, expected, actual, delta, msgAndArgs...) -} - -// InDelta asserts that the two numerals are within delta of each other. -// -// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01) -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) InDelta(expected, actual interface{}, delta float64, msgAndArgs ...interface{}) bool { - return InDelta(a.t, expected, actual, delta, msgAndArgs...) -} - -// InEpsilon asserts that expected and actual have a relative error less than epsilon -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) InEpsilon(expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) bool { - return InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...) -} - -// NoError asserts that a function returned no error (i.e. `nil`). -// -// actualObj, err := SomeFunction() -// if assert.NoError(err) { -// assert.Equal(actualObj, expectedObj) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) NoError(theError error, msgAndArgs ...interface{}) bool { - return NoError(a.t, theError, msgAndArgs...) -} - -// Error asserts that a function returned an error (i.e. not `nil`). -// -// actualObj, err := SomeFunction() -// if assert.Error(err, "An error was expected") { -// assert.Equal(err, expectedError) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Error(theError error, msgAndArgs ...interface{}) bool { - return Error(a.t, theError, msgAndArgs...) -} - -// EqualError asserts that a function returned an error (i.e. not `nil`) -// and that it is equal to the provided error. -// -// actualObj, err := SomeFunction() -// if assert.Error(err, "An error was expected") { -// assert.Equal(err, expectedError) -// } -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) bool { - return EqualError(a.t, theError, errString, msgAndArgs...) -} - -// Regexp asserts that a specified regexp matches a string. -// -// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") -// assert.Regexp(t, "start...$", "it's not starting") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - return Regexp(a.t, rx, str, msgAndArgs...) -} - -// NotRegexp asserts that a specified regexp does not match a string. -// -// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") -// assert.NotRegexp(t, "^start", "it's not starting") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) bool { - return NotRegexp(a.t, rx, str, msgAndArgs...) -} - -// Zero asserts that i is the zero value for its type and returns the truth. -func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) bool { - return Zero(a.t, i, msgAndArgs...) -} - -// NotZero asserts that i is not the zero value for its type and returns the truth. -func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) bool { - return NotZero(a.t, i, msgAndArgs...) -} - -// JSONEq asserts that two JSON strings are equivalent. -// -// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) bool { - return JSONEq(a.t, expected, actual, msgAndArgs...) -} +//go:generate go run ../_codegen/main.go -output-package=assert -template=assertion_forward.go.tmpl diff --git a/vendor/github.com/stretchr/testify/assert/http_assertions.go b/vendor/github.com/stretchr/testify/assert/http_assertions.go index 437a86ce4..e1b9442b5 100644 --- a/vendor/github.com/stretchr/testify/assert/http_assertions.go +++ b/vendor/github.com/stretchr/testify/assert/http_assertions.go @@ -104,54 +104,3 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url strin return !contains } - -// -// Assertions Wrappers -// - -// HTTPSuccess asserts that a specified handler returns a success status code. -// -// assert.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method, url string, values url.Values) bool { - return HTTPSuccess(a.t, handler, method, url, values) -} - -// HTTPRedirect asserts that a specified handler returns a redirect status code. -// -// assert.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method, url string, values url.Values) bool { - return HTTPRedirect(a.t, handler, method, url, values) -} - -// HTTPError asserts that a specified handler returns an error status code. -// -// assert.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPError(handler http.HandlerFunc, method, url string, values url.Values) bool { - return HTTPError(a.t, handler, method, url, values) -} - -// HTTPBodyContains asserts that a specified handler returns a -// body that contains a string. -// -// assert.HTTPBodyContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method, url string, values url.Values, str interface{}) bool { - return HTTPBodyContains(a.t, handler, method, url, values, str) -} - -// HTTPBodyNotContains asserts that a specified handler returns a -// body that does not contain a string. -// -// assert.HTTPBodyNotContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method, url string, values url.Values, str interface{}) bool { - return HTTPBodyNotContains(a.t, handler, method, url, values, str) -} diff --git a/vendor/github.com/stretchr/testify/require/doc.go b/vendor/github.com/stretchr/testify/require/doc.go index bc1614704..169de3922 100644 --- a/vendor/github.com/stretchr/testify/require/doc.go +++ b/vendor/github.com/stretchr/testify/require/doc.go @@ -1,4 +1,5 @@ -// Alternative testing tools which stop test execution if test failed. +// Package require implements the same assertions as the `assert` package but +// stops test execution when a test fails. // // Example Usage // diff --git a/vendor/github.com/stretchr/testify/require/forward_requirements.go b/vendor/github.com/stretchr/testify/require/forward_requirements.go index ee84abc34..d3c2ab9bc 100644 --- a/vendor/github.com/stretchr/testify/require/forward_requirements.go +++ b/vendor/github.com/stretchr/testify/require/forward_requirements.go @@ -1,230 +1,16 @@ package require -import ( - "time" - - "github.com/stretchr/testify/assert" -) - +// Assertions provides assertion methods around the +// TestingT interface. type Assertions struct { t TestingT } +// New makes a new Assertions object for the specified TestingT. func New(t TestingT) *Assertions { return &Assertions{ t: t, } } -// Fail reports a failure through -func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) { - FailNow(a.t, failureMessage, msgAndArgs...) -} - -// Implements asserts that an object is implemented by the specified interface. - -func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { - Implements(a.t, interfaceObject, object, msgAndArgs...) -} - -// IsType asserts that the specified objects are of the same type. -func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { - IsType(a.t, expectedType, object, msgAndArgs...) -} - -// Equal asserts that two objects are equal. -// -// require.Equal(123, 123, "123 and 123 should be equal") -func (a *Assertions) Equal(expected, actual interface{}, msgAndArgs ...interface{}) { - Equal(a.t, expected, actual, msgAndArgs...) -} - -// Exactly asserts that two objects are equal is value and type. -// -// require.Exactly(int32(123), int64(123), "123 and 123 should NOT be equal") -func (a *Assertions) Exactly(expected, actual interface{}, msgAndArgs ...interface{}) { - Exactly(a.t, expected, actual, msgAndArgs...) -} - -// NotNil asserts that the specified object is not nil. -// -// require.NotNil(err, "err should be something") -func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) { - NotNil(a.t, object, msgAndArgs...) -} - -// Nil asserts that the specified object is nil. -// -// require.Nil(err, "err should be nothing") -func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) { - Nil(a.t, object, msgAndArgs...) -} - -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or a -// slice with len == 0. -// -// require.Empty(obj) -func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) { - Empty(a.t, object, msgAndArgs...) -} - -// Empty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or a -// slice with len == 0. -// -// if require.NotEmpty(obj) { -// require.Equal("two", obj[1]) -// } -func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) { - NotEmpty(a.t, object, msgAndArgs...) -} - -// Len asserts that the specified object has specific length. -// Len also fails if the object has a type that len() not accept. -// -// require.Len(mySlice, 3, "The size of slice is not 3") -func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) { - Len(a.t, object, length, msgAndArgs...) -} - -// True asserts that the specified value is true. -// -// require.True(myBool, "myBool should be true") -func (a *Assertions) True(value bool, msgAndArgs ...interface{}) { - True(a.t, value, msgAndArgs...) -} - -// False asserts that the specified value is false. -// -// require.False(myBool, "myBool should be false") -func (a *Assertions) False(value bool, msgAndArgs ...interface{}) { - False(a.t, value, msgAndArgs...) -} - -// NotEqual asserts that the specified values are NOT equal. -// -// require.NotEqual(obj1, obj2, "two objects shouldn't be equal") -func (a *Assertions) NotEqual(expected, actual interface{}, msgAndArgs ...interface{}) { - NotEqual(a.t, expected, actual, msgAndArgs...) -} - -// Contains asserts that the specified string contains the specified substring. -// -// require.Contains("Hello World", "World", "But 'Hello World' does contain 'World'") -func (a *Assertions) Contains(s, contains interface{}, msgAndArgs ...interface{}) { - Contains(a.t, s, contains, msgAndArgs...) -} - -// NotContains asserts that the specified string does NOT contain the specified substring. -// -// require.NotContains("Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'") -func (a *Assertions) NotContains(s, contains interface{}, msgAndArgs ...interface{}) { - NotContains(a.t, s, contains, msgAndArgs...) -} - -// Uses a Comparison to assert a complex condition. -func (a *Assertions) Condition(comp assert.Comparison, msgAndArgs ...interface{}) { - Condition(a.t, comp, msgAndArgs...) -} - -// Panics asserts that the code inside the specified PanicTestFunc panics. -// -// require.Panics(func(){ -// GoCrazy() -// }, "Calling GoCrazy() should panic") -func (a *Assertions) Panics(f assert.PanicTestFunc, msgAndArgs ...interface{}) { - Panics(a.t, f, msgAndArgs...) -} - -// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. -// -// require.NotPanics(func(){ -// RemainCalm() -// }, "Calling RemainCalm() should NOT panic") -func (a *Assertions) NotPanics(f assert.PanicTestFunc, msgAndArgs ...interface{}) { - NotPanics(a.t, f, msgAndArgs...) -} - -// WithinDuration asserts that the two times are within duration delta of each other. -// -// require.WithinDuration(time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s") -func (a *Assertions) WithinDuration(expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) { - WithinDuration(a.t, expected, actual, delta, msgAndArgs...) -} - -// InDelta asserts that the two numerals are within delta of each other. -// -// require.InDelta(t, math.Pi, (22 / 7.0), 0.01) -func (a *Assertions) InDelta(expected, actual interface{}, delta float64, msgAndArgs ...interface{}) { - InDelta(a.t, expected, actual, delta, msgAndArgs...) -} - -// InEpsilon asserts that expected and actual have a relative error less than epsilon -func (a *Assertions) InEpsilon(expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) { - InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...) -} - -// NoError asserts that a function returned no error (i.e. `nil`). -// -// actualObj, err := SomeFunction() -// if require.NoError(err) { -// require.Equal(actualObj, expectedObj) -// } -func (a *Assertions) NoError(theError error, msgAndArgs ...interface{}) { - NoError(a.t, theError, msgAndArgs...) -} - -// Error asserts that a function returned an error (i.e. not `nil`). -// -// actualObj, err := SomeFunction() -// if require.Error(err, "An error was expected") { -// require.Equal(err, expectedError) -// } -func (a *Assertions) Error(theError error, msgAndArgs ...interface{}) { - Error(a.t, theError, msgAndArgs...) -} - -// EqualError asserts that a function returned an error (i.e. not `nil`) -// and that it is equal to the provided error. -// -// actualObj, err := SomeFunction() -// if require.Error(err, "An error was expected") { -// require.Equal(err, expectedError) -// } -func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) { - EqualError(a.t, theError, errString, msgAndArgs...) -} - -// Regexp asserts that a specified regexp matches a string. -// -// require.Regexp(t, regexp.MustCompile("start"), "it's starting") -// require.Regexp(t, "start...$", "it's not starting") -func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) { - Regexp(a.t, rx, str, msgAndArgs...) -} - -// NotRegexp asserts that a specified regexp does not match a string. -// -// require.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") -// require.NotRegexp(t, "^start", "it's not starting") -func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) { - NotRegexp(a.t, rx, str, msgAndArgs...) -} - -// Zero asserts that i is the zero value for its type and returns the truth. -func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) { - Zero(a.t, i, msgAndArgs...) -} - -// NotZero asserts that i is not the zero value for its type and returns the truth. -func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) { - NotZero(a.t, i, msgAndArgs...) -} - -// JSONEq asserts that two JSON strings are equivalent. -// -// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) -// -// Returns whether the assertion was successful (true) or not (false). -func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) { - JSONEq(a.t, expected, actual, msgAndArgs...) -} +//go:generate go run ../_codegen/main.go -output-package=require -template=require_forward.go.tmpl diff --git a/vendor/github.com/stretchr/testify/require/require.go b/vendor/github.com/stretchr/testify/require/require.go new file mode 100644 index 000000000..1bcfcb0d9 --- /dev/null +++ b/vendor/github.com/stretchr/testify/require/require.go @@ -0,0 +1,464 @@ +/* +* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen +* THIS FILE MUST NOT BE EDITED BY HAND +*/ + +package require + +import ( + + assert "github.com/stretchr/testify/assert" + http "net/http" + url "net/url" + time "time" +) + + +// Condition uses a Comparison to assert a complex condition. +func Condition(t TestingT, comp assert.Comparison, msgAndArgs ...interface{}) { + if !assert.Condition(t, comp, msgAndArgs...) { + t.FailNow() + } +} + + +// Contains asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// assert.Contains(t, "Hello World", "World", "But 'Hello World' does contain 'World'") +// assert.Contains(t, ["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'") +// assert.Contains(t, {"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'") +// +// Returns whether the assertion was successful (true) or not (false). +func Contains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) { + if !assert.Contains(t, s, contains, msgAndArgs...) { + t.FailNow() + } +} + + +// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// assert.Empty(t, obj) +// +// Returns whether the assertion was successful (true) or not (false). +func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { + if !assert.Empty(t, object, msgAndArgs...) { + t.FailNow() + } +} + + +// Equal asserts that two objects are equal. +// +// assert.Equal(t, 123, 123, "123 and 123 should be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func Equal(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if !assert.Equal(t, expected, actual, msgAndArgs...) { + t.FailNow() + } +} + + +// EqualError asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// if assert.Error(t, err, "An error was expected") { +// assert.Equal(t, err, expectedError) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) { + if !assert.EqualError(t, theError, errString, msgAndArgs...) { + t.FailNow() + } +} + + +// EqualValues asserts that two objects are equal or convertable to the same types +// and equal. +// +// assert.EqualValues(t, uint32(123), int32(123), "123 and 123 should be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if !assert.EqualValues(t, expected, actual, msgAndArgs...) { + t.FailNow() + } +} + + +// Error asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if assert.Error(t, err, "An error was expected") { +// assert.Equal(t, err, expectedError) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func Error(t TestingT, err error, msgAndArgs ...interface{}) { + if !assert.Error(t, err, msgAndArgs...) { + t.FailNow() + } +} + + +// Exactly asserts that two objects are equal is value and type. +// +// assert.Exactly(t, int32(123), int64(123), "123 and 123 should NOT be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func Exactly(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if !assert.Exactly(t, expected, actual, msgAndArgs...) { + t.FailNow() + } +} + + +// Fail reports a failure through +func Fail(t TestingT, failureMessage string, msgAndArgs ...interface{}) { + if !assert.Fail(t, failureMessage, msgAndArgs...) { + t.FailNow() + } +} + + +// FailNow fails test +func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) { + if !assert.FailNow(t, failureMessage, msgAndArgs...) { + t.FailNow() + } +} + + +// False asserts that the specified value is false. +// +// assert.False(t, myBool, "myBool should be false") +// +// Returns whether the assertion was successful (true) or not (false). +func False(t TestingT, value bool, msgAndArgs ...interface{}) { + if !assert.False(t, value, msgAndArgs...) { + t.FailNow() + } +} + + +// HTTPBodyContains asserts that a specified handler returns a +// body that contains a string. +// +// assert.HTTPBodyContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) { + if !assert.HTTPBodyContains(t, handler, method, url, values, str) { + t.FailNow() + } +} + + +// HTTPBodyNotContains asserts that a specified handler returns a +// body that does not contain a string. +// +// assert.HTTPBodyNotContains(t, myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) { + if !assert.HTTPBodyNotContains(t, handler, method, url, values, str) { + t.FailNow() + } +} + + +// HTTPError asserts that a specified handler returns an error status code. +// +// assert.HTTPError(t, myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPError(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values) { + if !assert.HTTPError(t, handler, method, url, values) { + t.FailNow() + } +} + + +// HTTPRedirect asserts that a specified handler returns a redirect status code. +// +// assert.HTTPRedirect(t, myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPRedirect(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values) { + if !assert.HTTPRedirect(t, handler, method, url, values) { + t.FailNow() + } +} + + +// HTTPSuccess asserts that a specified handler returns a success status code. +// +// assert.HTTPSuccess(t, myHandler, "POST", "http://www.google.com", nil) +// +// Returns whether the assertion was successful (true) or not (false). +func HTTPSuccess(t TestingT, handler http.HandlerFunc, method string, url string, values url.Values) { + if !assert.HTTPSuccess(t, handler, method, url, values) { + t.FailNow() + } +} + + +// Implements asserts that an object is implemented by the specified interface. +// +// assert.Implements(t, (*MyInterface)(nil), new(MyObject), "MyObject") +func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { + if !assert.Implements(t, interfaceObject, object, msgAndArgs...) { + t.FailNow() + } +} + + +// InDelta asserts that the two numerals are within delta of each other. +// +// assert.InDelta(t, math.Pi, (22 / 7.0), 0.01) +// +// Returns whether the assertion was successful (true) or not (false). +func InDelta(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { + if !assert.InDelta(t, expected, actual, delta, msgAndArgs...) { + t.FailNow() + } +} + + +// InDeltaSlice is the same as InDelta, except it compares two slices. +func InDeltaSlice(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { + if !assert.InDeltaSlice(t, expected, actual, delta, msgAndArgs...) { + t.FailNow() + } +} + + +// InEpsilon asserts that expected and actual have a relative error less than epsilon +// +// Returns whether the assertion was successful (true) or not (false). +func InEpsilon(t TestingT, expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) { + if !assert.InEpsilon(t, expected, actual, epsilon, msgAndArgs...) { + t.FailNow() + } +} + + +// InEpsilonSlice is the same as InEpsilon, except it compares two slices. +func InEpsilonSlice(t TestingT, expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { + if !assert.InEpsilonSlice(t, expected, actual, delta, msgAndArgs...) { + t.FailNow() + } +} + + +// IsType asserts that the specified objects are of the same type. +func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { + if !assert.IsType(t, expectedType, object, msgAndArgs...) { + t.FailNow() + } +} + + +// JSONEq asserts that two JSON strings are equivalent. +// +// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +// +// Returns whether the assertion was successful (true) or not (false). +func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) { + if !assert.JSONEq(t, expected, actual, msgAndArgs...) { + t.FailNow() + } +} + + +// Len asserts that the specified object has specific length. +// Len also fails if the object has a type that len() not accept. +// +// assert.Len(t, mySlice, 3, "The size of slice is not 3") +// +// Returns whether the assertion was successful (true) or not (false). +func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) { + if !assert.Len(t, object, length, msgAndArgs...) { + t.FailNow() + } +} + + +// Nil asserts that the specified object is nil. +// +// assert.Nil(t, err, "err should be nothing") +// +// Returns whether the assertion was successful (true) or not (false). +func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) { + if !assert.Nil(t, object, msgAndArgs...) { + t.FailNow() + } +} + + +// NoError asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if assert.NoError(t, err) { +// assert.Equal(t, actualObj, expectedObj) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func NoError(t TestingT, err error, msgAndArgs ...interface{}) { + if !assert.NoError(t, err, msgAndArgs...) { + t.FailNow() + } +} + + +// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// assert.NotContains(t, "Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'") +// assert.NotContains(t, ["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'") +// assert.NotContains(t, {"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'") +// +// Returns whether the assertion was successful (true) or not (false). +func NotContains(t TestingT, s interface{}, contains interface{}, msgAndArgs ...interface{}) { + if !assert.NotContains(t, s, contains, msgAndArgs...) { + t.FailNow() + } +} + + +// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if assert.NotEmpty(t, obj) { +// assert.Equal(t, "two", obj[1]) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) { + if !assert.NotEmpty(t, object, msgAndArgs...) { + t.FailNow() + } +} + + +// NotEqual asserts that the specified values are NOT equal. +// +// assert.NotEqual(t, obj1, obj2, "two objects shouldn't be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func NotEqual(t TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + if !assert.NotEqual(t, expected, actual, msgAndArgs...) { + t.FailNow() + } +} + + +// NotNil asserts that the specified object is not nil. +// +// assert.NotNil(t, err, "err should be something") +// +// Returns whether the assertion was successful (true) or not (false). +func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) { + if !assert.NotNil(t, object, msgAndArgs...) { + t.FailNow() + } +} + + +// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// assert.NotPanics(t, func(){ +// RemainCalm() +// }, "Calling RemainCalm() should NOT panic") +// +// Returns whether the assertion was successful (true) or not (false). +func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { + if !assert.NotPanics(t, f, msgAndArgs...) { + t.FailNow() + } +} + + +// NotRegexp asserts that a specified regexp does not match a string. +// +// assert.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") +// assert.NotRegexp(t, "^start", "it's not starting") +// +// Returns whether the assertion was successful (true) or not (false). +func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { + if !assert.NotRegexp(t, rx, str, msgAndArgs...) { + t.FailNow() + } +} + + +// NotZero asserts that i is not the zero value for its type and returns the truth. +func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) { + if !assert.NotZero(t, i, msgAndArgs...) { + t.FailNow() + } +} + + +// Panics asserts that the code inside the specified PanicTestFunc panics. +// +// assert.Panics(t, func(){ +// GoCrazy() +// }, "Calling GoCrazy() should panic") +// +// Returns whether the assertion was successful (true) or not (false). +func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { + if !assert.Panics(t, f, msgAndArgs...) { + t.FailNow() + } +} + + +// Regexp asserts that a specified regexp matches a string. +// +// assert.Regexp(t, regexp.MustCompile("start"), "it's starting") +// assert.Regexp(t, "start...$", "it's not starting") +// +// Returns whether the assertion was successful (true) or not (false). +func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { + if !assert.Regexp(t, rx, str, msgAndArgs...) { + t.FailNow() + } +} + + +// True asserts that the specified value is true. +// +// assert.True(t, myBool, "myBool should be true") +// +// Returns whether the assertion was successful (true) or not (false). +func True(t TestingT, value bool, msgAndArgs ...interface{}) { + if !assert.True(t, value, msgAndArgs...) { + t.FailNow() + } +} + + +// WithinDuration asserts that the two times are within duration delta of each other. +// +// assert.WithinDuration(t, time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s") +// +// Returns whether the assertion was successful (true) or not (false). +func WithinDuration(t TestingT, expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) { + if !assert.WithinDuration(t, expected, actual, delta, msgAndArgs...) { + t.FailNow() + } +} + + +// Zero asserts that i is the zero value for its type and returns the truth. +func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) { + if !assert.Zero(t, i, msgAndArgs...) { + t.FailNow() + } +} diff --git a/vendor/github.com/stretchr/testify/require/require.go.tmpl b/vendor/github.com/stretchr/testify/require/require.go.tmpl new file mode 100644 index 000000000..ab1b1e9fd --- /dev/null +++ b/vendor/github.com/stretchr/testify/require/require.go.tmpl @@ -0,0 +1,6 @@ +{{.Comment}} +func {{.DocInfo.Name}}(t TestingT, {{.Params}}) { + if !assert.{{.DocInfo.Name}}(t, {{.ForwardedParams}}) { + t.FailNow() + } +} diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go b/vendor/github.com/stretchr/testify/require/require_forward.go new file mode 100644 index 000000000..58324f105 --- /dev/null +++ b/vendor/github.com/stretchr/testify/require/require_forward.go @@ -0,0 +1,388 @@ +/* +* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen +* THIS FILE MUST NOT BE EDITED BY HAND +*/ + +package require + +import ( + + assert "github.com/stretchr/testify/assert" + http "net/http" + url "net/url" + time "time" +) + + +// Condition uses a Comparison to assert a complex condition. +func (a *Assertions) Condition(comp assert.Comparison, msgAndArgs ...interface{}) { + Condition(a.t, comp, msgAndArgs...) +} + + +// Contains asserts that the specified string, list(array, slice...) or map contains the +// specified substring or element. +// +// a.Contains("Hello World", "World", "But 'Hello World' does contain 'World'") +// a.Contains(["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'") +// a.Contains({"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Contains(s interface{}, contains interface{}, msgAndArgs ...interface{}) { + Contains(a.t, s, contains, msgAndArgs...) +} + + +// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// a.Empty(obj) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) { + Empty(a.t, object, msgAndArgs...) +} + + +// Equal asserts that two objects are equal. +// +// a.Equal(123, 123, "123 and 123 should be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Equal(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + Equal(a.t, expected, actual, msgAndArgs...) +} + + +// EqualError asserts that a function returned an error (i.e. not `nil`) +// and that it is equal to the provided error. +// +// actualObj, err := SomeFunction() +// if assert.Error(t, err, "An error was expected") { +// assert.Equal(t, err, expectedError) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) EqualError(theError error, errString string, msgAndArgs ...interface{}) { + EqualError(a.t, theError, errString, msgAndArgs...) +} + + +// EqualValues asserts that two objects are equal or convertable to the same types +// and equal. +// +// a.EqualValues(uint32(123), int32(123), "123 and 123 should be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + EqualValues(a.t, expected, actual, msgAndArgs...) +} + + +// Error asserts that a function returned an error (i.e. not `nil`). +// +// actualObj, err := SomeFunction() +// if a.Error(err, "An error was expected") { +// assert.Equal(t, err, expectedError) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Error(err error, msgAndArgs ...interface{}) { + Error(a.t, err, msgAndArgs...) +} + + +// Exactly asserts that two objects are equal is value and type. +// +// a.Exactly(int32(123), int64(123), "123 and 123 should NOT be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Exactly(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + Exactly(a.t, expected, actual, msgAndArgs...) +} + + +// Fail reports a failure through +func (a *Assertions) Fail(failureMessage string, msgAndArgs ...interface{}) { + Fail(a.t, failureMessage, msgAndArgs...) +} + + +// FailNow fails test +func (a *Assertions) FailNow(failureMessage string, msgAndArgs ...interface{}) { + FailNow(a.t, failureMessage, msgAndArgs...) +} + + +// False asserts that the specified value is false. +// +// a.False(myBool, "myBool should be false") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) False(value bool, msgAndArgs ...interface{}) { + False(a.t, value, msgAndArgs...) +} + + +// HTTPBodyContains asserts that a specified handler returns a +// body that contains a string. +// +// a.HTTPBodyContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) { + HTTPBodyContains(a.t, handler, method, url, values, str) +} + + +// HTTPBodyNotContains asserts that a specified handler returns a +// body that does not contain a string. +// +// a.HTTPBodyNotContains(myHandler, "www.google.com", nil, "I'm Feeling Lucky") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPBodyNotContains(handler http.HandlerFunc, method string, url string, values url.Values, str interface{}) { + HTTPBodyNotContains(a.t, handler, method, url, values, str) +} + + +// HTTPError asserts that a specified handler returns an error status code. +// +// a.HTTPError(myHandler, "POST", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPError(handler http.HandlerFunc, method string, url string, values url.Values) { + HTTPError(a.t, handler, method, url, values) +} + + +// HTTPRedirect asserts that a specified handler returns a redirect status code. +// +// a.HTTPRedirect(myHandler, "GET", "/a/b/c", url.Values{"a": []string{"b", "c"}} +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPRedirect(handler http.HandlerFunc, method string, url string, values url.Values) { + HTTPRedirect(a.t, handler, method, url, values) +} + + +// HTTPSuccess asserts that a specified handler returns a success status code. +// +// a.HTTPSuccess(myHandler, "POST", "http://www.google.com", nil) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) HTTPSuccess(handler http.HandlerFunc, method string, url string, values url.Values) { + HTTPSuccess(a.t, handler, method, url, values) +} + + +// Implements asserts that an object is implemented by the specified interface. +// +// a.Implements((*MyInterface)(nil), new(MyObject), "MyObject") +func (a *Assertions) Implements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { + Implements(a.t, interfaceObject, object, msgAndArgs...) +} + + +// InDelta asserts that the two numerals are within delta of each other. +// +// a.InDelta(math.Pi, (22 / 7.0), 0.01) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) InDelta(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { + InDelta(a.t, expected, actual, delta, msgAndArgs...) +} + + +// InDeltaSlice is the same as InDelta, except it compares two slices. +func (a *Assertions) InDeltaSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { + InDeltaSlice(a.t, expected, actual, delta, msgAndArgs...) +} + + +// InEpsilon asserts that expected and actual have a relative error less than epsilon +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) InEpsilon(expected interface{}, actual interface{}, epsilon float64, msgAndArgs ...interface{}) { + InEpsilon(a.t, expected, actual, epsilon, msgAndArgs...) +} + + +// InEpsilonSlice is the same as InEpsilon, except it compares two slices. +func (a *Assertions) InEpsilonSlice(expected interface{}, actual interface{}, delta float64, msgAndArgs ...interface{}) { + InEpsilonSlice(a.t, expected, actual, delta, msgAndArgs...) +} + + +// IsType asserts that the specified objects are of the same type. +func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { + IsType(a.t, expectedType, object, msgAndArgs...) +} + + +// JSONEq asserts that two JSON strings are equivalent. +// +// a.JSONEq(`{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) JSONEq(expected string, actual string, msgAndArgs ...interface{}) { + JSONEq(a.t, expected, actual, msgAndArgs...) +} + + +// Len asserts that the specified object has specific length. +// Len also fails if the object has a type that len() not accept. +// +// a.Len(mySlice, 3, "The size of slice is not 3") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Len(object interface{}, length int, msgAndArgs ...interface{}) { + Len(a.t, object, length, msgAndArgs...) +} + + +// Nil asserts that the specified object is nil. +// +// a.Nil(err, "err should be nothing") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Nil(object interface{}, msgAndArgs ...interface{}) { + Nil(a.t, object, msgAndArgs...) +} + + +// NoError asserts that a function returned no error (i.e. `nil`). +// +// actualObj, err := SomeFunction() +// if a.NoError(err) { +// assert.Equal(t, actualObj, expectedObj) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NoError(err error, msgAndArgs ...interface{}) { + NoError(a.t, err, msgAndArgs...) +} + + +// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the +// specified substring or element. +// +// a.NotContains("Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'") +// a.NotContains(["Hello", "World"], "Earth", "But ['Hello', 'World'] does NOT contain 'Earth'") +// a.NotContains({"Hello": "World"}, "Earth", "But {'Hello': 'World'} does NOT contain 'Earth'") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotContains(s interface{}, contains interface{}, msgAndArgs ...interface{}) { + NotContains(a.t, s, contains, msgAndArgs...) +} + + +// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either +// a slice or a channel with len == 0. +// +// if a.NotEmpty(obj) { +// assert.Equal(t, "two", obj[1]) +// } +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) { + NotEmpty(a.t, object, msgAndArgs...) +} + + +// NotEqual asserts that the specified values are NOT equal. +// +// a.NotEqual(obj1, obj2, "two objects shouldn't be equal") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotEqual(expected interface{}, actual interface{}, msgAndArgs ...interface{}) { + NotEqual(a.t, expected, actual, msgAndArgs...) +} + + +// NotNil asserts that the specified object is not nil. +// +// a.NotNil(err, "err should be something") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotNil(object interface{}, msgAndArgs ...interface{}) { + NotNil(a.t, object, msgAndArgs...) +} + + +// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. +// +// a.NotPanics(func(){ +// RemainCalm() +// }, "Calling RemainCalm() should NOT panic") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotPanics(f assert.PanicTestFunc, msgAndArgs ...interface{}) { + NotPanics(a.t, f, msgAndArgs...) +} + + +// NotRegexp asserts that a specified regexp does not match a string. +// +// a.NotRegexp(regexp.MustCompile("starts"), "it's starting") +// a.NotRegexp("^start", "it's not starting") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) NotRegexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) { + NotRegexp(a.t, rx, str, msgAndArgs...) +} + + +// NotZero asserts that i is not the zero value for its type and returns the truth. +func (a *Assertions) NotZero(i interface{}, msgAndArgs ...interface{}) { + NotZero(a.t, i, msgAndArgs...) +} + + +// Panics asserts that the code inside the specified PanicTestFunc panics. +// +// a.Panics(func(){ +// GoCrazy() +// }, "Calling GoCrazy() should panic") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Panics(f assert.PanicTestFunc, msgAndArgs ...interface{}) { + Panics(a.t, f, msgAndArgs...) +} + + +// Regexp asserts that a specified regexp matches a string. +// +// a.Regexp(regexp.MustCompile("start"), "it's starting") +// a.Regexp("start...$", "it's not starting") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) Regexp(rx interface{}, str interface{}, msgAndArgs ...interface{}) { + Regexp(a.t, rx, str, msgAndArgs...) +} + + +// True asserts that the specified value is true. +// +// a.True(myBool, "myBool should be true") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) True(value bool, msgAndArgs ...interface{}) { + True(a.t, value, msgAndArgs...) +} + + +// WithinDuration asserts that the two times are within duration delta of each other. +// +// a.WithinDuration(time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s") +// +// Returns whether the assertion was successful (true) or not (false). +func (a *Assertions) WithinDuration(expected time.Time, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) { + WithinDuration(a.t, expected, actual, delta, msgAndArgs...) +} + + +// Zero asserts that i is the zero value for its type and returns the truth. +func (a *Assertions) Zero(i interface{}, msgAndArgs ...interface{}) { + Zero(a.t, i, msgAndArgs...) +} diff --git a/vendor/github.com/stretchr/testify/require/require_forward.go.tmpl b/vendor/github.com/stretchr/testify/require/require_forward.go.tmpl new file mode 100644 index 000000000..b93569e0a --- /dev/null +++ b/vendor/github.com/stretchr/testify/require/require_forward.go.tmpl @@ -0,0 +1,4 @@ +{{.CommentWithoutT "a"}} +func (a *Assertions) {{.DocInfo.Name}}({{.Params}}) { + {{.DocInfo.Name}}(a.t, {{.ForwardedParams}}) +} diff --git a/vendor/github.com/stretchr/testify/require/requirements.go b/vendor/github.com/stretchr/testify/require/requirements.go index d46cfe5a2..41147562d 100644 --- a/vendor/github.com/stretchr/testify/require/requirements.go +++ b/vendor/github.com/stretchr/testify/require/requirements.go @@ -1,308 +1,9 @@ package require -import ( - "encoding/json" - "time" - - "github.com/stretchr/testify/assert" -) - +// TestingT is an interface wrapper around *testing.T type TestingT interface { Errorf(format string, args ...interface{}) FailNow() } -// Fail reports a failure through -func FailNow(t TestingT, failureMessage string, msgAndArgs ...interface{}) { - assert.Fail(t, failureMessage, msgAndArgs...) - t.FailNow() -} - -// Implements asserts that an object is implemented by the specified interface. -// -// require.Implements(t, (*MyInterface)(nil), new(MyObject), "MyObject") -func Implements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) { - if !assert.Implements(t, interfaceObject, object, msgAndArgs...) { - t.FailNow() - } -} - -// IsType asserts that the specified objects are of the same type. -func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { - if !assert.IsType(t, expectedType, object, msgAndArgs...) { - t.FailNow() - } -} - -// Equal asserts that two objects are equal. -// -// require.Equal(t, 123, 123, "123 and 123 should be equal") -func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) { - if !assert.Equal(t, expected, actual, msgAndArgs...) { - t.FailNow() - } -} - -// EqualValues asserts that two objects are equal or convertable to each other. -// -// require.EqualValues(t, uint32(123), int32(123), "123 and 123 should be equal") -func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) { - if !assert.EqualValues(t, expected, actual, msgAndArgs...) { - t.FailNow() - } -} - -// Exactly asserts that two objects are equal is value and type. -// -// require.Exactly(t, int32(123), int64(123), "123 and 123 should NOT be equal") -func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) { - if !assert.Exactly(t, expected, actual, msgAndArgs...) { - t.FailNow() - } -} - -// NotNil asserts that the specified object is not nil. -// -// require.NotNil(t, err, "err should be something") -func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) { - if !assert.NotNil(t, object, msgAndArgs...) { - t.FailNow() - } -} - -// Nil asserts that the specified object is nil. -// -// require.Nil(t, err, "err should be nothing") -func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) { - if !assert.Nil(t, object, msgAndArgs...) { - t.FailNow() - } -} - -// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// require.Empty(t, obj) -func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { - if !assert.Empty(t, object, msgAndArgs...) { - t.FailNow() - } -} - -// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either -// a slice or a channel with len == 0. -// -// require.NotEmpty(t, obj) -// require.Equal(t, "one", obj[0]) -func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) { - if !assert.NotEmpty(t, object, msgAndArgs...) { - t.FailNow() - } -} - -// Len asserts that the specified object has specific length. -// Len also fails if the object has a type that len() not accept. -// -// require.Len(t, mySlice, 3, "The size of slice is not 3") -func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{}) { - if !assert.Len(t, object, length, msgAndArgs...) { - t.FailNow() - } -} - -// True asserts that the specified value is true. -// -// require.True(t, myBool, "myBool should be true") -func True(t TestingT, value bool, msgAndArgs ...interface{}) { - if !assert.True(t, value, msgAndArgs...) { - t.FailNow() - } -} - -// False asserts that the specified value is false. -// -// require.False(t, myBool, "myBool should be false") -func False(t TestingT, value bool, msgAndArgs ...interface{}) { - if !assert.False(t, value, msgAndArgs...) { - t.FailNow() - } -} - -// NotEqual asserts that the specified values are NOT equal. -// -// require.NotEqual(t, obj1, obj2, "two objects shouldn't be equal") -func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) { - if !assert.NotEqual(t, expected, actual, msgAndArgs...) { - t.FailNow() - } -} - -// Contains asserts that the specified string, list(array, slice...) or map contains the -// specified substring or element. -// -// require.Contains(t, "Hello World", "World", "But 'Hello World' does contain 'World'") -// require.Contains(t, ["Hello", "World"], "World", "But ["Hello", "World"] does contain 'World'") -// require.Contains(t, {"Hello": "World"}, "Hello", "But {'Hello': 'World'} does contain 'Hello'") -func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) { - if !assert.Contains(t, s, contains, msgAndArgs...) { - t.FailNow() - } -} - -// NotContains asserts that the specified string does NOT contain the specified substring. -// -// require.NotContains(t, "Hello World", "Earth", "But 'Hello World' does NOT contain 'Earth'") -func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) { - if !assert.NotContains(t, s, contains, msgAndArgs...) { - t.FailNow() - } -} - -// Condition uses a Comparison to assert a complex condition. -func Condition(t TestingT, comp assert.Comparison, msgAndArgs ...interface{}) { - if !assert.Condition(t, comp, msgAndArgs...) { - t.FailNow() - } -} - -// Panics asserts that the code inside the specified PanicTestFunc panics. -// -// require.Panics(t, func(){ -// GoCrazy() -// }, "Calling GoCrazy() should panic") -func Panics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { - if !assert.Panics(t, f, msgAndArgs...) { - t.FailNow() - } -} - -// NotPanics asserts that the code inside the specified PanicTestFunc does NOT panic. -// -// require.NotPanics(t, func(){ -// RemainCalm() -// }, "Calling RemainCalm() should NOT panic") -func NotPanics(t TestingT, f assert.PanicTestFunc, msgAndArgs ...interface{}) { - if !assert.NotPanics(t, f, msgAndArgs...) { - t.FailNow() - } -} - -// WithinDuration asserts that the two times are within duration delta of each other. -// -// require.WithinDuration(t, time.Now(), time.Now(), 10*time.Second, "The difference should not be more than 10s") -func WithinDuration(t TestingT, expected, actual time.Time, delta time.Duration, msgAndArgs ...interface{}) { - if !assert.WithinDuration(t, expected, actual, delta, msgAndArgs...) { - t.FailNow() - } -} - -// InDelta asserts that the two numerals are within delta of each other. -// -// require.InDelta(t, math.Pi, (22 / 7.0), 0.01) -func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs ...interface{}) { - if !assert.InDelta(t, expected, actual, delta, msgAndArgs...) { - t.FailNow() - } -} - -// InEpsilon asserts that expected and actual have a relative error less than epsilon -func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAndArgs ...interface{}) { - if !assert.InEpsilon(t, expected, actual, epsilon, msgAndArgs...) { - t.FailNow() - } -} - -// Regexp asserts that a specified regexp matches a string. -// -// require.Regexp(t, regexp.MustCompile("start"), "it's starting") -// require.Regexp(t, "start...$", "it's not starting") -func Regexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { - if !assert.Regexp(t, rx, str, msgAndArgs...) { - t.FailNow() - } -} - -// NotRegexp asserts that a specified regexp does not match a string. -// -// require.NotRegexp(t, regexp.MustCompile("starts"), "it's starting") -// require.NotRegexp(t, "^start", "it's not starting") -func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interface{}) { - if !assert.NotRegexp(t, rx, str, msgAndArgs...) { - t.FailNow() - } -} - -// JSONEq asserts that two JSON strings are equivalent. -// -// assert.JSONEq(t, `{"hello": "world", "foo": "bar"}`, `{"foo": "bar", "hello": "world"}`) -// -// Returns whether the assertion was successful (true) or not (false). -func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{}) { - var expectedJSONAsInterface, actualJSONAsInterface interface{} - - if err := json.Unmarshal([]byte(expected), &expectedJSONAsInterface); err != nil { - t.FailNow() - } - - if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil { - t.FailNow() - } - - Equal(t, expectedJSONAsInterface, actualJSONAsInterface, msgAndArgs...) -} - -/* - Errors -*/ - -// NoError asserts that a function returned no error (i.e. `nil`). -// -// actualObj, err := SomeFunction() -// require.NoError(t, err) -// require.Equal(t, actualObj, expectedObj) -// -// Returns whether the assertion was successful (true) or not (false). -func NoError(t TestingT, err error, msgAndArgs ...interface{}) { - if !assert.NoError(t, err, msgAndArgs...) { - t.FailNow() - } -} - -// Error asserts that a function returned an error (i.e. not `nil`). -// -// actualObj, err := SomeFunction() -// require.Error(t, err, "An error was expected") -// require.Equal(t, err, expectedError) -// } -func Error(t TestingT, err error, msgAndArgs ...interface{}) { - if !assert.Error(t, err, msgAndArgs...) { - t.FailNow() - } -} - -// EqualError asserts that a function returned an error (i.e. not `nil`) -// and that it is equal to the provided error. -// -// actualObj, err := SomeFunction() -// require.Error(t, err, "An error was expected") -// require.Equal(t, err, expectedError) -// } -func EqualError(t TestingT, theError error, errString string, msgAndArgs ...interface{}) { - if !assert.EqualError(t, theError, errString, msgAndArgs...) { - t.FailNow() - } -} - -// Zero asserts that i is the zero value for its type and returns the truth. -func Zero(t TestingT, i interface{}, msgAndArgs ...interface{}) { - if !assert.Zero(t, i, msgAndArgs...) { - t.FailNow() - } -} - -// NotZero asserts that i is not the zero value for its type and returns the truth. -func NotZero(t TestingT, i interface{}, msgAndArgs ...interface{}) { - if !assert.NotZero(t, i, msgAndArgs...) { - t.FailNow() - } -} +//go:generate go run ../_codegen/main.go -output-package=require -template=require.go.tmpl diff --git a/vendor/go.pedge.io/dlog/LICENSE b/vendor/go.pedge.io/dlog/LICENSE new file mode 100644 index 000000000..89ddc0632 --- /dev/null +++ b/vendor/go.pedge.io/dlog/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Peter Edge + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/vendor/go.pedge.io/dlog/Makefile b/vendor/go.pedge.io/dlog/Makefile new file mode 100644 index 000000000..4136c9586 --- /dev/null +++ b/vendor/go.pedge.io/dlog/Makefile @@ -0,0 +1,54 @@ +all: test + +deps: + go get -d -v ./... + +updatedeps: + go get -d -v -u -f ./... + +testdeps: + go get -d -v -t ./... + +updatetestdeps: + go get -d -v -t -u -f ./... + +build: deps + go build ./... + +lint: testdeps + go get -v github.com/golang/lint/golint + for file in $$(find . -name '*.go'); do \ + golint $${file} | grep -v underscore; \ + if [ -n "$$(golint $${file} | grep -v underscore)" ]; then \ + exit 1; \ + fi; \ + done + +vet: testdeps + go vet ./... + +errcheck: testdeps + go get -v github.com/kisielk/errcheck + errcheck ./... + +pretest: lint vet errcheck + +test: testdeps pretest + go test -v ./... + +clean: + go clean ./... + +.PHONY: \ + all \ + deps \ + updatedeps \ + testdeps \ + updatetestdeps \ + build \ + lint \ + vet \ + errcheck \ + pretest \ + test \ + clean diff --git a/vendor/go.pedge.io/dlog/README.md b/vendor/go.pedge.io/dlog/README.md new file mode 100644 index 000000000..d8f4d0e48 --- /dev/null +++ b/vendor/go.pedge.io/dlog/README.md @@ -0,0 +1,60 @@ +[![CircleCI](https://circleci.com/gh/peter-edge/dlog-go/tree/master.png)](https://circleci.com/gh/peter-edge/dlog-go/tree/master) +[![Go Report Card](http://goreportcard.com/badge/peter-edge/dlog-go)](http://goreportcard.com/report/peter-edge/dlog-go) +[![GoDoc](http://img.shields.io/badge/GoDoc-Reference-blue.svg)](https://godoc.org/go.pedge.io/dlog) +[![MIT License](http://img.shields.io/badge/License-MIT-blue.svg)](https://github.com/peter-edge/dlog-go/blob/master/LICENSE) + +dlog (delegating log) wraps common functionality for common golang logging packages. + +The `dlog.Logger` interface wraps the common logging functionality. Every method on `dlog.Logger` +is also a global method on the `dlog` package. Given an implementation of `dlog.Logger`, you can +register it as the global logger by calling: + +```go +func register(logger dlog.Logger) { + dlog.SetLogger(logger) +} +``` + +To make things simple, packages for glog, logrus, protolog, and lion are given with the ability to easily register +their implementations as the default logger: + +```go +import ( + "go.pedge.io/dlog/glog" // set glog as the global logger + "go.pedge.io/dlog/logrus" // set logrus as the global logger with default settings + "go.pedge.io/dlog/protolog" // set protolog as the global logger with default settings + "go.pedge.io/dlog/lion" // set lion as the global logger with default settings +) + +func registrationFunctions() { + dlog_glog.Register() // set glog as the global logger + dlog_logrus.Register() // set logrus as the global logger with default settings + dlog_protolog.Register() // set protolog as the global logger with default settings + dlog_lion.Register() // set lion as the global logger with default settings +} +``` + +Or, do something more custom: + +```go +import ( + "os" + + "go.pedge.io/dlog" + dloglogrus "go.pedge.io/dlog/logrus" + + "github.com/Sirupsen/logrus" +) + +func init() { // or anywhere + logger := logrus.New() + logger.Out = os.Stdout + logger.Formatter = &logrus.TextFormatter{ + ForceColors: true, + } + dlog.SetLogger(dloglogrus.NewLogger(logger)) +} +``` + +By default, golang's standard logger is used. This is not recommended, however, as the implementation +with the WithFields function is slow. It would be better to choose a different implementation in most cases. diff --git a/vendor/go.pedge.io/dlog/circle.yml b/vendor/go.pedge.io/dlog/circle.yml new file mode 100644 index 000000000..60d295ccc --- /dev/null +++ b/vendor/go.pedge.io/dlog/circle.yml @@ -0,0 +1,8 @@ +dependencies: + override: + - mkdir -p "$(echo $GOPATH | cut -f 1 -d :)/src/go.pedge.io/dlog" + - rsync -azC --delete ./ "$(echo $GOPATH | cut -f 1 -d :)/src/go.pedge.io/dlog/" + - make -C "$(echo $GOPATH | cut -f 1 -d :)/src/go.pedge.io/dlog" testdeps +test: + override: + - make -C "$(echo $GOPATH | cut -f 1 -d :)/src/go.pedge.io/dlog" test diff --git a/vendor/go.pedge.io/dlog/dlog.go b/vendor/go.pedge.io/dlog/dlog.go new file mode 100644 index 000000000..d61381b99 --- /dev/null +++ b/vendor/go.pedge.io/dlog/dlog.go @@ -0,0 +1,366 @@ +/* +Package dlog (delegating log) wraps common functionality for common golang logging packages. + +The Logger interface wraps the common logging functionality. Every method on Logger +is also a global method on the dlog package. Given an implementation of Logger, you can +register it as the global logger by calling: + + func register(logger dlog.Logger) { + dlog.SetLogger(logger) + } + +To make things simple, packages for glog, logrus, protolog, and lion are given with the ability to easily register +their implementations as the default logger: + + import ( + "go.pedge.io/dlog/glog" // set glog as the global logger + "go.pedge.io/dlog/logrus" // set logrus as the global logger with default settings + "go.pedge.io/dlog/protolog" // set protolog as the global logger with default settings + "go.pedge.io/dlog/lion" // set lion as the global logger with default settings + ) + + func registrationFunctions() { + dlog_glog.Register() // set glog as the global logger + dlog_logrus.Register() // set logrus as the global logger with default settings + dlog_protolog.Register() // set protolog as the global logger with default settings + dlog_lion.Register() // set lion as the global logger with default settings + } + +Or, do something more custom: + + import ( + "os" + + "go.pedge.io/dlog" + dloglogrus "go.pedge.io/dlog/logrus" + + "github.com/Sirupsen/logrus" + ) + + func init() { // or anywhere + logger := logrus.New() + logger.Out = os.Stdout + logger.Formatter = &logrus.TextFormatter{ + ForceColors: true, + } + dlog.SetLogger(dloglogrus.NewLogger(logger)) + } + +By default, golang's standard logger is used. This is not recommended, however, as the implementation +with the WithFields function is slow. It would be better to choose a different implementation in most cases. +*/ +package dlog // import "go.pedge.io/dlog" + +import ( + "fmt" + "log" + "os" + "strings" + "sync" + "unicode" +) + +var ( + // DefaultLogger is the default Logger. + DefaultLogger = NewStdLogger(log.New(os.Stderr, "", log.LstdFlags)) + // DefaultLevel is the default Level. + DefaultLevel = LevelInfo + + globalLogger = DefaultLogger + globalLevel = DefaultLevel + globalLevelSet = false + globalLock = &sync.Mutex{} +) + +// BaseLogger is the Logger's log functionality, split from WithField/WithFields for easier wrapping of other libraries. +type BaseLogger interface { + Debugf(format string, args ...interface{}) + Debugln(args ...interface{}) + Infof(format string, args ...interface{}) + Infoln(args ...interface{}) + Warnf(format string, args ...interface{}) + Warnln(args ...interface{}) + Errorf(format string, args ...interface{}) + Errorln(args ...interface{}) + Fatalf(format string, args ...interface{}) + Fatalln(args ...interface{}) + Panicf(format string, args ...interface{}) + Panicln(args ...interface{}) + Printf(format string, args ...interface{}) + Println(args ...interface{}) +} + +// Logger is an interface that all logging implementations must implement. +type Logger interface { + BaseLogger + AtLevel(level Level) Logger + WithField(key string, value interface{}) Logger + WithFields(fields map[string]interface{}) Logger +} + +// Register re-registers the default Logger as the dlog global Logger. +func Register() { + SetLogger(DefaultLogger) +} + +// SetLogger sets the global logger used by dlog. +func SetLogger(logger Logger) { + globalLock.Lock() + defer globalLock.Unlock() + if globalLevelSet { + logger = logger.AtLevel(globalLevel) + } + globalLogger = logger +} + +// SetLevel sets the global Level. +func SetLevel(level Level) { + globalLock.Lock() + defer globalLock.Unlock() + if globalLevel != level { + globalLogger = globalLogger.AtLevel(level) + } + globalLevel = level + globalLevelSet = true +} + +// NewLogger creates a new Logger using a print function, and optionally +// specific Level to print functions (levelToPrintFunc can be nil). +// +// printFunc is used if a Level is not represented. +// LevelNone overrides printFunc. +// +// printFunc is required. +func NewLogger(printFunc func(...interface{}), levelToPrintFunc map[Level]func(...interface{})) Logger { + return newLogger(globalLevel, printFunc, levelToPrintFunc) +} + +// NewStdLogger creates a new Logger using a standard golang Logger. +func NewStdLogger(l *log.Logger) Logger { + return newLogger(globalLevel, l.Println, nil) +} + +// WithField calls WithField on the global Logger. +func WithField(key string, value interface{}) Logger { + return globalLogger.WithField(key, value) +} + +// WithFields calls WithFields on the global Logger. +func WithFields(fields map[string]interface{}) Logger { + return globalLogger.WithFields(fields) +} + +// Debugf logs at the debug level with the semantics of fmt.Printf. +func Debugf(format string, args ...interface{}) { + globalLogger.Debugf(format, args...) +} + +// Debugln logs at the debug level with the semantics of fmt.Println. +func Debugln(args ...interface{}) { + globalLogger.Debugln(args...) +} + +// Infof logs at the info level with the semantics of fmt.Printf. +func Infof(format string, args ...interface{}) { + globalLogger.Infof(format, args...) +} + +// Infoln logs at the info level with the semantics of fmt.Println. +func Infoln(args ...interface{}) { + globalLogger.Infoln(args...) +} + +// Warnf logs at the warn level with the semantics of fmt.Printf. +func Warnf(format string, args ...interface{}) { + globalLogger.Warnf(format, args...) +} + +// Warnln logs at the warn level with the semantics of fmt.Println. +func Warnln(args ...interface{}) { + globalLogger.Warnln(args...) +} + +// Errorf logs at the error level with the semantics of fmt.Printf. +func Errorf(format string, args ...interface{}) { + globalLogger.Errorf(format, args...) +} + +// Errorln logs at the error level with the semantics of fmt.Println. +func Errorln(args ...interface{}) { + globalLogger.Errorln(args...) +} + +// Fatalf logs at the fatal level with the semantics of fmt.Printf and exits with os.Exit(1). +func Fatalf(format string, args ...interface{}) { + globalLogger.Fatalf(format, args...) +} + +// Fatalln logs at the fatal level with the semantics of fmt.Println and exits with os.Exit(1). +func Fatalln(args ...interface{}) { + globalLogger.Fatalln(args...) +} + +// Panicf logs at the panic level with the semantics of fmt.Printf and panics. +func Panicf(format string, args ...interface{}) { + globalLogger.Panicf(format, args...) +} + +// Panicln logs at the panic level with the semantics of fmt.Println and panics. +func Panicln(args ...interface{}) { + globalLogger.Panicln(args...) +} + +// Printf logs at the info level with the semantics of fmt.Printf. +func Printf(format string, args ...interface{}) { + globalLogger.Printf(format, args...) +} + +// Println logs at the info level with the semantics of fmt.Println. +func Println(args ...interface{}) { + globalLogger.Println(args...) +} + +type logger struct { + level Level + levelToPrintFunc map[Level]func(...interface{}) + fields map[string]interface{} +} + +func newLogger(initialLevel Level, printFunc func(...interface{}), levelToPrintFunc map[Level]func(...interface{})) *logger { + if printFunc == nil { + // really not a fan of this, but since this is generally called at initialization, just makes things + // easier for now + panic("dlog: printFunc is nil") + } + return &logger{initialLevel, getLevelToPrintFunc(printFunc, levelToPrintFunc), make(map[string]interface{}, 0)} +} + +func (l *logger) AtLevel(level Level) Logger { + return &logger{level, l.levelToPrintFunc, l.fields} +} + +func (l *logger) WithField(key string, value interface{}) Logger { + return l.WithFields(map[string]interface{}{key: value}) +} + +func (l *logger) WithFields(fields map[string]interface{}) Logger { + newFields := make(map[string]interface{}, len(l.fields)+len(fields)) + for key, value := range l.fields { + newFields[key] = value + } + for key, value := range fields { + newFields[key] = value + } + return &logger{l.level, l.levelToPrintFunc, newFields} +} + +func (l *logger) Debugf(format string, args ...interface{}) { + l.print(LevelDebug, fmt.Sprintf(format, args...)) +} + +func (l *logger) Debugln(args ...interface{}) { + l.print(LevelDebug, fmt.Sprint(args...)) +} + +func (l *logger) Infof(format string, args ...interface{}) { + l.print(LevelInfo, fmt.Sprintf(format, args...)) +} + +func (l *logger) Infoln(args ...interface{}) { + l.print(LevelInfo, fmt.Sprint(args...)) +} + +func (l *logger) Warnf(format string, args ...interface{}) { + l.print(LevelWarn, fmt.Sprintf(format, args...)) +} + +func (l *logger) Warnln(args ...interface{}) { + l.print(LevelWarn, fmt.Sprint(args...)) +} + +func (l *logger) Errorf(format string, args ...interface{}) { + l.print(LevelError, fmt.Sprintf(format, args...)) +} + +func (l *logger) Errorln(args ...interface{}) { + l.print(LevelError, fmt.Sprint(args...)) +} + +func (l *logger) Fatalf(format string, args ...interface{}) { + l.print(LevelFatal, fmt.Sprintf(format, args...)) + os.Exit(1) +} + +func (l *logger) Fatalln(args ...interface{}) { + l.print(LevelFatal, fmt.Sprint(args...)) + os.Exit(1) +} + +func (l *logger) Panicf(format string, args ...interface{}) { + l.print(LevelPanic, fmt.Sprintf(format, args...)) + panic(fmt.Sprintf(format, args...)) +} + +func (l *logger) Panicln(args ...interface{}) { + l.print(LevelPanic, fmt.Sprint(args...)) + panic(fmt.Sprint(args...)) +} + +func (l *logger) Printf(format string, args ...interface{}) { + l.print(LevelNone, fmt.Sprintf(format, args...)) +} + +func (l *logger) Println(args ...interface{}) { + l.print(LevelNone, fmt.Sprint(args...)) +} + +func (l *logger) print(level Level, value string) { + if level < l.level && l.level != LevelNone { + return + } + // expected to be ok since we covered this internally + printFunc, ok := l.levelToPrintFunc[level] + if !ok { + printFunc, ok = l.levelToPrintFunc[LevelNone] + if !ok { + panic("dlog: cannot find any printFunc") + } + } + fieldsString := l.getFieldsString() + if fieldsString == "" { + printFunc(value) + } else { + printFunc(fmt.Sprintf("%s %s", strings.TrimRightFunc(value, unicode.IsSpace), fieldsString)) + } +} + +func (l *logger) getFieldsString() string { + if len(l.fields) == 0 { + return "" + } + values := make([]string, len(l.fields)) + i := 0 + for key, value := range l.fields { + values[i] = fmt.Sprintf("%s=%v", key, value) + i++ + } + return strings.Join(values, " ") +} + +func getLevelToPrintFunc(printFunc func(...interface{}), inputLevelToPrintFunc map[Level]func(...interface{})) map[Level]func(...interface{}) { + levelToPrintFunc := make(map[Level]func(...interface{})) + if inputLevelToPrintFunc != nil { + for level, inputPrintFunc := range inputLevelToPrintFunc { + levelToPrintFunc[level] = inputPrintFunc + } + } + if _, ok := levelToPrintFunc[LevelNone]; !ok { + levelToPrintFunc[LevelNone] = printFunc + } + for level := range levelToName { + if _, ok := levelToPrintFunc[level]; !ok { + levelToPrintFunc[level] = printFunc + } + } + return levelToPrintFunc +} diff --git a/vendor/go.pedge.io/dlog/dlog_level.go b/vendor/go.pedge.io/dlog/dlog_level.go new file mode 100644 index 000000000..e3e177fea --- /dev/null +++ b/vendor/go.pedge.io/dlog/dlog_level.go @@ -0,0 +1,65 @@ +package dlog + +import ( + "fmt" + "strconv" +) + +const ( + // LevelNone represents no Level. + LevelNone Level = 0 + // LevelDebug is the debug Level. + LevelDebug Level = 1 + // LevelInfo is the info Level. + LevelInfo Level = 2 + // LevelWarn is the warn Level. + LevelWarn Level = 3 + // LevelError is the error Level. + LevelError Level = 4 + // LevelFatal is the fatal Level. + LevelFatal Level = 5 + // LevelPanic is the panic Level. + LevelPanic Level = 6 +) + +var ( + levelToName = map[Level]string{ + LevelNone: "NONE", + LevelDebug: "DEBUG", + LevelInfo: "INFO", + LevelWarn: "WARN", + LevelError: "ERROR", + LevelFatal: "FATAL", + LevelPanic: "PANIC", + } + nameToLevel = map[string]Level{ + "NONE": LevelNone, + "DEBUG": LevelDebug, + "INFO": LevelInfo, + "WARN": LevelWarn, + "ERROR": LevelError, + "FATAL": LevelFatal, + "PANIC": LevelPanic, + } +) + +// Level is a logging level. +type Level int32 + +// String returns the name of a Level or the numerical value if the Level is unknown. +func (l Level) String() string { + name, ok := levelToName[l] + if !ok { + return strconv.Itoa(int(l)) + } + return name +} + +// NameToLevel returns the Level for the given name. +func NameToLevel(name string) (Level, error) { + level, ok := nameToLevel[name] + if !ok { + return LevelNone, fmt.Errorf("lion: no level for name: %s", name) + } + return level, nil +} diff --git a/vendor/go.pedge.io/dlog/logrus/logrus.go b/vendor/go.pedge.io/dlog/logrus/logrus.go new file mode 100644 index 000000000..b5e1e81c5 --- /dev/null +++ b/vendor/go.pedge.io/dlog/logrus/logrus.go @@ -0,0 +1,74 @@ +package dlog_logrus // import "go.pedge.io/dlog/logrus" + +import ( + "go.pedge.io/dlog" + + "github.com/Sirupsen/logrus" +) + +var ( + levelToLogrusLevel = map[dlog.Level]logrus.Level{ + dlog.LevelNone: logrus.InfoLevel, + dlog.LevelDebug: logrus.DebugLevel, + dlog.LevelInfo: logrus.InfoLevel, + dlog.LevelWarn: logrus.WarnLevel, + dlog.LevelError: logrus.ErrorLevel, + dlog.LevelFatal: logrus.FatalLevel, + dlog.LevelPanic: logrus.PanicLevel, + } +) + +// Register registers the default logrus Logger as the dlog Logger. +func Register() { + dlog.SetLogger(NewLogger(logrus.StandardLogger())) +} + +// NewLogger returns a new dlog.Logger that uses the logrus.Logger. +func NewLogger(logrusLogger *logrus.Logger) dlog.Logger { + return newLogger(&loggerLogrusLogger{logrusLogger}) +} + +type logrusLogger interface { + dlog.BaseLogger + WithField(key string, value interface{}) *logrus.Entry + WithFields(fields logrus.Fields) *logrus.Entry + SetLevel(level dlog.Level) +} + +type loggerLogrusLogger struct { + *logrus.Logger +} + +func (l *loggerLogrusLogger) SetLevel(level dlog.Level) { + l.Logger.Level = levelToLogrusLevel[level] +} + +type entryLogrusLogger struct { + *logrus.Entry +} + +func (l *entryLogrusLogger) SetLevel(level dlog.Level) { + l.Entry.Level = levelToLogrusLevel[level] +} + +type logger struct { + dlog.BaseLogger + l logrusLogger +} + +func newLogger(l logrusLogger) *logger { + return &logger{l, l} +} + +func (l *logger) AtLevel(level dlog.Level) dlog.Logger { + l.l.SetLevel(level) + return l +} + +func (l *logger) WithField(key string, value interface{}) dlog.Logger { + return newLogger(&entryLogrusLogger{l.l.WithField(key, value)}) +} + +func (l *logger) WithFields(fields map[string]interface{}) dlog.Logger { + return newLogger(&entryLogrusLogger{l.l.WithFields(fields)}) +} diff --git a/vendor/go.pedge.io/pb/go/google/protobuf/any.pb.go b/vendor/go.pedge.io/pb/go/google/protobuf/any.pb.go new file mode 100644 index 000000000..071f1e70b --- /dev/null +++ b/vendor/go.pedge.io/pb/go/google/protobuf/any.pb.go @@ -0,0 +1,99 @@ +// Code generated by protoc-gen-go. +// source: google/protobuf/any.proto +// DO NOT EDIT! + +package google_protobuf + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +const _ = proto.ProtoPackageIsVersion1 + +// `Any` contains an arbitrary serialized message along with a URL +// that describes the type of the serialized message. +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +type Any struct { + // A URL/resource name whose content describes the type of the + // serialized message. + // + // For URLs which use the schema `http`, `https`, or no schema, the + // following restrictions and interpretations apply: + // + // * If no schema is provided, `https` is assumed. + // * The last segment of the URL's path must represent the fully + // qualified name of the type (as in `path/google.protobuf.Duration`). + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Schemas other than `http`, `https` (or the empty schema) might be + // used with implementation specific semantics. + // + TypeUrl string `protobuf:"bytes,1,opt,name=type_url" json:"type_url,omitempty"` + // Must be valid serialized data of the above specified type. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *Any) Reset() { *m = Any{} } +func (m *Any) String() string { return proto.CompactTextString(m) } +func (*Any) ProtoMessage() {} +func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func init() { + proto.RegisterType((*Any)(nil), "google.protobuf.Any") +} + +var fileDescriptor0 = []byte{ + // 150 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4, + 0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x35, 0x2e, 0x66, 0xc7, 0xbc, 0x4a, + 0x21, 0x01, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46, + 0x0d, 0x4e, 0x21, 0x5e, 0x2e, 0xd6, 0xb2, 0xc4, 0x9c, 0xd2, 0x54, 0x09, 0x26, 0x20, 0x97, 0xc7, + 0xc9, 0x9b, 0x4b, 0x38, 0x39, 0x3f, 0x57, 0x0f, 0x4d, 0xbb, 0x13, 0x07, 0x50, 0x73, 0x00, 0x88, + 0x13, 0xc0, 0xb8, 0x80, 0x91, 0x71, 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, + 0x88, 0xb2, 0x00, 0xa8, 0x32, 0xbd, 0xf0, 0xd4, 0x9c, 0x1c, 0xef, 0xbc, 0xfc, 0xf2, 0xbc, 0x10, + 0xa0, 0x25, 0xc5, 0x49, 0x6c, 0x60, 0xfd, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0xa0, 0x4c, + 0x2d, 0x0d, 0xa9, 0x00, 0x00, 0x00, +} diff --git a/vendor/go.pedge.io/pb/go/google/protobuf/api.pb.go b/vendor/go.pedge.io/pb/go/google/protobuf/api.pb.go new file mode 100644 index 000000000..9b96685fb --- /dev/null +++ b/vendor/go.pedge.io/pb/go/google/protobuf/api.pb.go @@ -0,0 +1,241 @@ +// Code generated by protoc-gen-go. +// source: google/protobuf/api.proto +// DO NOT EDIT! + +package google_protobuf + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Api is a light-weight descriptor for a protocol buffer service. +type Api struct { + // The fully qualified name of this api, including package name + // followed by the api's simple name. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The methods of this api, in unspecified order. + Methods []*Method `protobuf:"bytes,2,rep,name=methods" json:"methods,omitempty"` + // Any metadata attached to the API. + Options []*Option `protobuf:"bytes,3,rep,name=options" json:"options,omitempty"` + // A version string for this api. If specified, must have the form + // `major-version.minor-version`, as in `1.10`. If the minor version + // is omitted, it defaults to zero. If the entire version field is + // empty, the major version is derived from the package name, as + // outlined below. If the field is not empty, the version in the + // package name will be verified to be consistent with what is + // provided here. + // + // The versioning schema uses [semantic + // versioning](http://semver.org) where the major version number + // indicates a breaking change and the minor version an additive, + // non-breaking change. Both version numbers are signals to users + // what to expect from different versions, and should be carefully + // chosen based on the product plan. + // + // The major version is also reflected in the package name of the + // API, which must end in `v`, as in + // `google.feature.v1`. For major versions 0 and 1, the suffix can + // be omitted. Zero major versions must only be used for + // experimental, none-GA apis. + // + // + Version string `protobuf:"bytes,4,opt,name=version" json:"version,omitempty"` + // Source context for the protocol buffer service represented by this + // message. + SourceContext *SourceContext `protobuf:"bytes,5,opt,name=source_context" json:"source_context,omitempty"` + // Included APIs. See [Mixin][]. + Mixins []*Mixin `protobuf:"bytes,6,rep,name=mixins" json:"mixins,omitempty"` + // The source syntax of the service. + Syntax Syntax `protobuf:"varint,7,opt,name=syntax,enum=google.protobuf.Syntax" json:"syntax,omitempty"` +} + +func (m *Api) Reset() { *m = Api{} } +func (m *Api) String() string { return proto.CompactTextString(m) } +func (*Api) ProtoMessage() {} +func (*Api) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +func (m *Api) GetMethods() []*Method { + if m != nil { + return m.Methods + } + return nil +} + +func (m *Api) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (m *Api) GetSourceContext() *SourceContext { + if m != nil { + return m.SourceContext + } + return nil +} + +func (m *Api) GetMixins() []*Mixin { + if m != nil { + return m.Mixins + } + return nil +} + +// Method represents a method of an api. +type Method struct { + // The simple name of this method. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // A URL of the input message type. + RequestTypeUrl string `protobuf:"bytes,2,opt,name=request_type_url" json:"request_type_url,omitempty"` + // If true, the request is streamed. + RequestStreaming bool `protobuf:"varint,3,opt,name=request_streaming" json:"request_streaming,omitempty"` + // The URL of the output message type. + ResponseTypeUrl string `protobuf:"bytes,4,opt,name=response_type_url" json:"response_type_url,omitempty"` + // If true, the response is streamed. + ResponseStreaming bool `protobuf:"varint,5,opt,name=response_streaming" json:"response_streaming,omitempty"` + // Any metadata attached to the method. + Options []*Option `protobuf:"bytes,6,rep,name=options" json:"options,omitempty"` + // The source syntax of this method. + Syntax Syntax `protobuf:"varint,7,opt,name=syntax,enum=google.protobuf.Syntax" json:"syntax,omitempty"` +} + +func (m *Method) Reset() { *m = Method{} } +func (m *Method) String() string { return proto.CompactTextString(m) } +func (*Method) ProtoMessage() {} +func (*Method) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } + +func (m *Method) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +// Declares an API to be included in this API. The including API must +// redeclare all the methods from the included API, but documentation +// and options are inherited as follows: +// +// - If after comment and whitespace stripping, the documentation +// string of the redeclared method is empty, it will be inherited +// from the original method. +// +// - Each annotation belonging to the service config (http, +// visibility) which is not set in the redeclared method will be +// inherited. +// +// - If an http annotation is inherited, the path pattern will be +// modified as follows. Any version prefix will be replaced by the +// version of the including API plus the [root][] path if specified. +// +// Example of a simple mixin: +// +// package google.acl.v1; +// service AccessControl { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v1/{resource=**}:getAcl"; +// } +// } +// +// package google.storage.v2; +// service Storage { +// rpc GetAcl(GetAclRequest) returns (Acl); +// +// // Get a data record. +// rpc GetData(GetDataRequest) returns (Data) { +// option (google.api.http).get = "/v2/{resource=**}"; +// } +// } +// +// Example of a mixin configuration: +// +// apis: +// - name: google.storage.v2.Storage +// mixins: +// - name: google.acl.v1.AccessControl +// +// The mixin construct implies that all methods in `AccessControl` are +// also declared with same name and request/response types in +// `Storage`. A documentation generator or annotation processor will +// see the effective `Storage.GetAcl` method after inherting +// documentation and annotations as follows: +// +// service Storage { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v2/{resource=**}:getAcl"; +// } +// ... +// } +// +// Note how the version in the path pattern changed from `v1` to `v2`. +// +// If the `root` field in the mixin is specified, it should be a +// relative path under which inherited HTTP paths are placed. Example: +// +// apis: +// - name: google.storage.v2.Storage +// mixins: +// - name: google.acl.v1.AccessControl +// root: acls +// +// This implies the following inherited HTTP annotation: +// +// service Storage { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v2/acls/{resource=**}:getAcl"; +// } +// ... +// } +type Mixin struct { + // The fully qualified name of the API which is included. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // If non-empty specifies a path under which inherited HTTP paths + // are rooted. + Root string `protobuf:"bytes,2,opt,name=root" json:"root,omitempty"` +} + +func (m *Mixin) Reset() { *m = Mixin{} } +func (m *Mixin) String() string { return proto.CompactTextString(m) } +func (*Mixin) ProtoMessage() {} +func (*Mixin) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } + +func init() { + proto.RegisterType((*Api)(nil), "google.protobuf.Api") + proto.RegisterType((*Method)(nil), "google.protobuf.Method") + proto.RegisterType((*Mixin)(nil), "google.protobuf.Mixin") +} + +var fileDescriptor1 = []byte{ + // 363 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x92, 0xcf, 0x4e, 0xf2, 0x40, + 0x14, 0xc5, 0xd3, 0x16, 0x0a, 0xdf, 0xe5, 0x0b, 0x68, 0x4d, 0x74, 0xe8, 0x82, 0x10, 0x34, 0xda, + 0x55, 0x49, 0x30, 0x71, 0x2f, 0x2e, 0x58, 0x10, 0x63, 0x13, 0x4c, 0x5c, 0x92, 0x82, 0x23, 0x36, + 0x69, 0x67, 0xea, 0xcc, 0x54, 0x61, 0xe9, 0x9b, 0x18, 0x97, 0xbe, 0x8f, 0xef, 0xe3, 0x74, 0x06, + 0x44, 0x8a, 0x1a, 0x77, 0xbd, 0xfd, 0x9d, 0xfb, 0xe7, 0x9c, 0x0c, 0x34, 0x67, 0x94, 0xce, 0x62, + 0xdc, 0x4d, 0x19, 0x15, 0x74, 0x92, 0xdd, 0x75, 0xc3, 0x34, 0xf2, 0x55, 0xe1, 0x34, 0x34, 0xf2, + 0x57, 0xc8, 0x3d, 0x2a, 0x6a, 0x39, 0xcd, 0xd8, 0x14, 0x8f, 0xa7, 0x94, 0x08, 0x3c, 0x17, 0x5a, + 0xe8, 0xba, 0x45, 0x95, 0x58, 0xa4, 0xcb, 0x21, 0x9d, 0x67, 0x13, 0xac, 0xf3, 0x34, 0x72, 0xfe, + 0x43, 0x89, 0x84, 0x09, 0x46, 0x46, 0xdb, 0xf0, 0xfe, 0x39, 0x1e, 0x54, 0x12, 0x2c, 0xee, 0xe9, + 0x2d, 0x47, 0x66, 0xdb, 0xf2, 0x6a, 0xbd, 0x03, 0xbf, 0xb0, 0xda, 0xbf, 0x54, 0x3c, 0x57, 0xd2, + 0x54, 0x44, 0x94, 0x70, 0x64, 0xfd, 0xa0, 0xbc, 0x52, 0xdc, 0x69, 0x40, 0xe5, 0x11, 0x33, 0x2e, + 0x3f, 0x51, 0x49, 0x2d, 0x39, 0x83, 0xfa, 0xe6, 0xb9, 0xa8, 0x2c, 0xff, 0xd7, 0x7a, 0xad, 0xad, + 0x09, 0x23, 0x25, 0xbb, 0xd0, 0x2a, 0xe7, 0x18, 0xec, 0x24, 0x9a, 0x47, 0x72, 0xa3, 0xad, 0x36, + 0xee, 0x6f, 0xdf, 0x96, 0x63, 0xe7, 0x04, 0x6c, 0xbe, 0x20, 0x22, 0x9c, 0xa3, 0x8a, 0x9c, 0x5b, + 0xff, 0xe6, 0xb2, 0x91, 0xc2, 0x9d, 0x77, 0x03, 0xec, 0xa5, 0x9d, 0xcd, 0x18, 0x10, 0xec, 0x30, + 0xfc, 0x90, 0x61, 0x2e, 0xc6, 0x79, 0x64, 0xe3, 0x8c, 0xc5, 0x32, 0x8f, 0x9c, 0x34, 0x61, 0x77, + 0x45, 0xb8, 0x60, 0x38, 0x4c, 0x22, 0x32, 0x93, 0x01, 0x18, 0x5e, 0x55, 0x23, 0x9e, 0xca, 0x44, + 0xf0, 0xba, 0x4b, 0x3b, 0x76, 0xc1, 0xf9, 0x44, 0xeb, 0xb6, 0xb2, 0x6a, 0xfb, 0x12, 0xa4, 0xfd, + 0x7b, 0x90, 0x7f, 0xf6, 0x75, 0x08, 0x65, 0x9d, 0xc4, 0xa6, 0x2b, 0x59, 0x31, 0x4a, 0x85, 0x76, + 0xd2, 0x1f, 0xc2, 0xde, 0x94, 0x26, 0xc5, 0x11, 0xfd, 0xaa, 0x7c, 0x14, 0x41, 0x5e, 0x04, 0xc6, + 0x8b, 0x61, 0xbc, 0x9a, 0xd6, 0x20, 0xe8, 0xbf, 0x99, 0xad, 0x81, 0x96, 0x05, 0xab, 0x4d, 0x37, + 0x38, 0x8e, 0x87, 0x84, 0x3e, 0x91, 0x6b, 0xe9, 0x94, 0x4f, 0x6c, 0xd5, 0x7f, 0xfa, 0x11, 0x00, + 0x00, 0xff, 0xff, 0x50, 0x26, 0x5d, 0xa4, 0xc4, 0x02, 0x00, 0x00, +} diff --git a/vendor/go.pedge.io/pb/go/google/protobuf/duration.pb.go b/vendor/go.pedge.io/pb/go/google/protobuf/duration.pb.go new file mode 100644 index 000000000..746d8d012 --- /dev/null +++ b/vendor/go.pedge.io/pb/go/google/protobuf/duration.pb.go @@ -0,0 +1,91 @@ +// Code generated by protoc-gen-go. +// source: google/protobuf/duration.proto +// DO NOT EDIT! + +package google_protobuf + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +type Duration struct { + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` +} + +func (m *Duration) Reset() { *m = Duration{} } +func (m *Duration) String() string { return proto.CompactTextString(m) } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } + +func init() { + proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") +} + +var fileDescriptor2 = []byte{ + // 155 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a, + 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x5a, + 0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0xfc, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5, + 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x42, 0xbc, 0x5c, 0xac, 0x79, 0x89, 0x79, 0xf9, 0xc5, 0x12, + 0x4c, 0x40, 0x2e, 0xab, 0x53, 0x00, 0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x11, 0x4e, 0xbc, + 0x30, 0x03, 0x02, 0x40, 0x22, 0x01, 0x8c, 0x0b, 0x18, 0x19, 0x17, 0x31, 0x31, 0xbb, 0x07, 0x38, + 0xad, 0x62, 0x92, 0x73, 0x87, 0xa8, 0x0d, 0x80, 0xaa, 0xd5, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce, + 0xcb, 0x2f, 0xcf, 0x0b, 0xa9, 0x2c, 0x48, 0x2d, 0x4e, 0x62, 0x03, 0x1b, 0x62, 0x0c, 0x08, 0x00, + 0x00, 0xff, 0xff, 0xb9, 0x0d, 0xae, 0x51, 0xb7, 0x00, 0x00, 0x00, +} diff --git a/vendor/go.pedge.io/pb/go/google/protobuf/empty.pb.go b/vendor/go.pedge.io/pb/go/google/protobuf/empty.pb.go new file mode 100644 index 000000000..361753492 --- /dev/null +++ b/vendor/go.pedge.io/pb/go/google/protobuf/empty.pb.go @@ -0,0 +1,47 @@ +// Code generated by protoc-gen-go. +// source: google/protobuf/empty.proto +// DO NOT EDIT! + +package google_protobuf + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +type Empty struct { +} + +func (m *Empty) Reset() { *m = Empty{} } +func (m *Empty) String() string { return proto.CompactTextString(m) } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0} } + +func init() { + proto.RegisterType((*Empty)(nil), "google.protobuf.Empty") +} + +var fileDescriptor3 = []byte{ + // 124 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcd, 0x2d, 0x28, + 0xa9, 0xd4, 0x03, 0x73, 0x85, 0xf8, 0x21, 0x92, 0x7a, 0x30, 0x49, 0x25, 0x76, 0x2e, 0x56, 0x57, + 0x90, 0xbc, 0x53, 0x00, 0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0xbc, 0x13, 0x17, 0x58, 0x36, + 0x00, 0xc4, 0x0d, 0x60, 0x5c, 0xc0, 0xc8, 0xf8, 0x83, 0x91, 0x71, 0x11, 0x13, 0xb3, 0x7b, 0x80, + 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xda, 0x00, 0xa8, 0x5a, 0xbd, 0xf0, 0xd4, 0x9c, 0x1c, 0xef, + 0xbc, 0xfc, 0xf2, 0xbc, 0x90, 0xca, 0x82, 0xd4, 0xe2, 0x24, 0x36, 0xb0, 0x21, 0xc6, 0x80, 0x00, + 0x00, 0x00, 0xff, 0xff, 0xac, 0xca, 0x5b, 0xd0, 0x91, 0x00, 0x00, 0x00, +} diff --git a/vendor/go.pedge.io/pb/go/google/protobuf/field_mask.pb.go b/vendor/go.pedge.io/pb/go/google/protobuf/field_mask.pb.go new file mode 100644 index 000000000..84713c033 --- /dev/null +++ b/vendor/go.pedge.io/pb/go/google/protobuf/field_mask.pb.go @@ -0,0 +1,164 @@ +// Code generated by protoc-gen-go. +// source: google/protobuf/field_mask.proto +// DO NOT EDIT! + +package google_protobuf + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// `FieldMask` represents a set of symbolic field paths, for example: +// +// paths: "f.a" +// paths: "f.b.d" +// +// Here `f` represents a field in some root message, `a` and `b` +// fields in the message found in `f`, and `d` a field found in the +// message in `f.b`. +// +// Field masks are used to specify a subset of fields that should be +// returned by a get operation or modified by an update operation. +// Field masks also have a custom JSON encoding (see below). +// +// # Field Masks in Projections +// +// When used in the context of a projection, a response message or +// sub-message is filtered by the API to only contain those fields as +// specified in the mask. For example, if the mask in the previous +// example is applied to a response message as follows: +// +// f { +// a : 22 +// b { +// d : 1 +// x : 2 +// } +// y : 13 +// } +// z: 8 +// +// The result will not contain specific values for fields x,y and z +// (their value will be set to the default, and omitted in proto text +// output): +// +// +// f { +// a : 22 +// b { +// d : 1 +// } +// } +// +// A repeated field is not allowed except at the last position of a +// field mask. +// +// If a FieldMask object is not present in a get operation, the +// operation applies to all fields (as if a FieldMask of all fields +// had been specified). +// +// Note that a field mask does not necessarily applies to the +// top-level response message. In case of a REST get operation, the +// field mask applies directly to the response, but in case of a REST +// list operation, the mask instead applies to each individual message +// in the returned resource list. In case of a REST custom method, +// other definitions may be used. Where the mask applies will be +// clearly documented together with its declaration in the API. In +// any case, the effect on the returned resource/resources is required +// behavior for APIs. +// +// # Field Masks in Update Operations +// +// A field mask in update operations specifies which fields of the +// targeted resource are going to be updated. The API is required +// to only change the values of the fields as specified in the mask +// and leave the others untouched. If a resource is passed in to +// describe the updated values, the API ignores the values of all +// fields not covered by the mask. +// +// In order to reset a field's value to the default, the field must +// be in the mask and set to the default value in the provided resource. +// Hence, in order to reset all fields of a resource, provide a default +// instance of the resource and set all fields in the mask, or do +// not provide a mask as described below. +// +// If a field mask is not present on update, the operation applies to +// all fields (as if a field mask of all fields has been specified). +// Note that in the presence of schema evolution, this may mean that +// fields the client does not know and has therefore not filled into +// the request will be reset to their default. If this is unwanted +// behavior, a specific service may require a client to always specify +// a field mask, producing an error if not. +// +// As with get operations, the location of the resource which +// describes the updated values in the request message depends on the +// operation kind. In any case, the effect of the field mask is +// required to be honored by the API. +// +// ## Considerations for HTTP REST +// +// The HTTP kind of an update operation which uses a field mask must +// be set to PATCH instead of PUT in order to satisfy HTTP semantics +// (PUT must only be used for full updates). +// +// # JSON Encoding of Field Masks +// +// In JSON, a field mask is encoded as a single string where paths are +// separated by a comma. Fields name in each path are converted +// to/from lower-camel naming conventions. +// +// As an example, consider the following message declarations: +// +// message Profile { +// User user = 1; +// Photo photo = 2; +// } +// message User { +// string display_name = 1; +// string address = 2; +// } +// +// In proto a field mask for `Profile` may look as such: +// +// mask { +// paths: "user.display_name" +// paths: "photo" +// } +// +// In JSON, the same mask is represented as below: +// +// { +// mask: "user.displayName,photo" +// } +// +type FieldMask struct { + // The set of field mask paths. + Paths []string `protobuf:"bytes,1,rep,name=paths" json:"paths,omitempty"` +} + +func (m *FieldMask) Reset() { *m = FieldMask{} } +func (m *FieldMask) String() string { return proto.CompactTextString(m) } +func (*FieldMask) ProtoMessage() {} +func (*FieldMask) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{0} } + +func init() { + proto.RegisterType((*FieldMask)(nil), "google.protobuf.FieldMask") +} + +var fileDescriptor4 = []byte{ + // 144 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x52, 0x48, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcb, 0x4c, 0xcd, + 0x49, 0x89, 0xcf, 0x4d, 0x2c, 0xce, 0xd6, 0x03, 0x8b, 0x09, 0xf1, 0x43, 0x54, 0xe8, 0xc1, 0x54, + 0x28, 0x49, 0x71, 0x71, 0xba, 0x81, 0x14, 0xf9, 0x02, 0xd5, 0x08, 0xf1, 0x72, 0xb1, 0x16, 0x24, + 0x96, 0x64, 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6b, 0x70, 0x3a, 0x05, 0x72, 0x09, 0x27, 0xe7, 0xe7, + 0xea, 0xa1, 0x69, 0x71, 0xe2, 0x83, 0x6b, 0x08, 0x00, 0x09, 0x05, 0x30, 0x2e, 0x60, 0x64, 0x5c, + 0xc4, 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0xa2, 0x38, 0x00, 0xaa, 0x58, 0x2f, + 0x3c, 0x35, 0x27, 0xc7, 0x3b, 0x2f, 0xbf, 0x3c, 0x2f, 0xa4, 0xb2, 0x20, 0xb5, 0x38, 0x89, 0x0d, + 0x6c, 0x8a, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0xc7, 0x3e, 0x59, 0xd3, 0xaa, 0x00, 0x00, 0x00, +} diff --git a/vendor/go.pedge.io/pb/go/google/protobuf/protobuf.gen.go b/vendor/go.pedge.io/pb/go/google/protobuf/protobuf.gen.go new file mode 100644 index 000000000..f7ee15ab4 --- /dev/null +++ b/vendor/go.pedge.io/pb/go/google/protobuf/protobuf.gen.go @@ -0,0 +1,67 @@ +package google_protobuf + +import ( + "time" +) + +var ( + // EmptyInstance is an instance of Empty. + EmptyInstance = &Empty{} +) + +// Now returns the current time as a protobuf Timestamp. +func Now() *Timestamp { + return TimeToProto(time.Now().UTC()) +} + +// TimeToProto converts a go Time to a protobuf Timestamp. +func TimeToProto(t time.Time) *Timestamp { + return &Timestamp{ + Seconds: t.UnixNano() / int64(time.Second), + Nanos: int32(t.UnixNano() % int64(time.Second)), + } +} + +// GoTime converts a protobuf Timestamp to a go Time. +func (t *Timestamp) GoTime() time.Time { + if t == nil { + return time.Unix(0, 0).UTC() + } + return time.Unix( + t.Seconds, + int64(t.Nanos), + ).UTC() +} + +// Before returns true if t is before j. +func (t *Timestamp) Before(j *Timestamp) bool { + if j == nil { + return false + } + if t == nil { + return true + } + if t.Seconds < j.Seconds { + return true + } + if t.Seconds > j.Seconds { + return false + } + return t.Nanos < j.Nanos +} + +// DurationToProto converts a go Duration to a protobuf Duration. +func DurationToProto(d time.Duration) *Duration { + return &Duration{ + Seconds: int64(d) / int64(time.Second), + Nanos: int32(int64(d) % int64(time.Second)), + } +} + +// GoDuration converts a protobuf Duration to a go Duration. +func (d *Duration) GoDuration() time.Duration { + if d == nil { + return 0 + } + return time.Duration((d.Seconds * int64(time.Second)) + int64(d.Nanos)) +} diff --git a/vendor/go.pedge.io/pb/go/google/protobuf/source_context.pb.go b/vendor/go.pedge.io/pb/go/google/protobuf/source_context.pb.go new file mode 100644 index 000000000..75f6aa7bc --- /dev/null +++ b/vendor/go.pedge.io/pb/go/google/protobuf/source_context.pb.go @@ -0,0 +1,45 @@ +// Code generated by protoc-gen-go. +// source: google/protobuf/source_context.proto +// DO NOT EDIT! + +package google_protobuf + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// `SourceContext` represents information about the source of a +// protobuf element, like the file in which it is defined. +type SourceContext struct { + // The path-qualified name of the .proto file that contained the associated + // protobuf element. For example: `"google/protobuf/source.proto"`. + FileName string `protobuf:"bytes,1,opt,name=file_name" json:"file_name,omitempty"` +} + +func (m *SourceContext) Reset() { *m = SourceContext{} } +func (m *SourceContext) String() string { return proto.CompactTextString(m) } +func (*SourceContext) ProtoMessage() {} +func (*SourceContext) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{0} } + +func init() { + proto.RegisterType((*SourceContext)(nil), "google.protobuf.SourceContext") +} + +var fileDescriptor5 = []byte{ + // 153 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x52, 0x49, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xce, 0x2f, 0x2d, + 0x4a, 0x4e, 0x8d, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xad, 0x28, 0xd1, 0x03, 0x8b, 0x0b, 0xf1, 0x43, + 0x54, 0xe9, 0xc1, 0x54, 0x29, 0x29, 0x71, 0xf1, 0x06, 0x83, 0x15, 0x3a, 0x43, 0xd4, 0x09, 0x09, + 0x72, 0x71, 0xa6, 0x65, 0xe6, 0xa4, 0xc6, 0xe7, 0x25, 0xe6, 0xa6, 0x4a, 0x30, 0x2a, 0x30, 0x6a, + 0x70, 0x3a, 0x85, 0x72, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x69, 0x75, 0x12, 0x42, 0xd1, 0x18, + 0x00, 0x12, 0x0e, 0x60, 0x5c, 0xc0, 0xc8, 0xb8, 0x88, 0x89, 0xd9, 0x3d, 0xc0, 0x69, 0x15, 0x93, + 0x9c, 0x3b, 0x44, 0x43, 0x00, 0x54, 0x83, 0x5e, 0x78, 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, + 0x5e, 0x48, 0x65, 0x41, 0x6a, 0x71, 0x12, 0x1b, 0xd8, 0x24, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, + 0xff, 0x87, 0xa9, 0xa9, 0x25, 0xba, 0x00, 0x00, 0x00, +} diff --git a/vendor/go.pedge.io/pb/go/google/protobuf/struct.pb.go b/vendor/go.pedge.io/pb/go/google/protobuf/struct.pb.go new file mode 100644 index 000000000..992f1802b --- /dev/null +++ b/vendor/go.pedge.io/pb/go/google/protobuf/struct.pb.go @@ -0,0 +1,353 @@ +// Code generated by protoc-gen-go. +// source: google/protobuf/struct.proto +// DO NOT EDIT! + +package google_protobuf + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +type NullValue int32 + +const ( + // Null value. + NullValue_NULL_VALUE NullValue = 0 +) + +var NullValue_name = map[int32]string{ + 0: "NULL_VALUE", +} +var NullValue_value = map[string]int32{ + "NULL_VALUE": 0, +} + +func (x NullValue) String() string { + return proto.EnumName(NullValue_name, int32(x)) +} +func (NullValue) EnumDescriptor() ([]byte, []int) { return fileDescriptor6, []int{0} } + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +type Struct struct { + // Map of dynamically typed values. + Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *Struct) Reset() { *m = Struct{} } +func (m *Struct) String() string { return proto.CompactTextString(m) } +func (*Struct) ProtoMessage() {} +func (*Struct) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{0} } + +func (m *Struct) GetFields() map[string]*Value { + if m != nil { + return m.Fields + } + return nil +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of that +// variants, absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +type Value struct { + // The kind of value. + // + // Types that are valid to be assigned to Kind: + // *Value_NullValue + // *Value_NumberValue + // *Value_StringValue + // *Value_BoolValue + // *Value_StructValue + // *Value_ListValue + Kind isValue_Kind `protobuf_oneof:"kind"` +} + +func (m *Value) Reset() { *m = Value{} } +func (m *Value) String() string { return proto.CompactTextString(m) } +func (*Value) ProtoMessage() {} +func (*Value) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{1} } + +type isValue_Kind interface { + isValue_Kind() +} + +type Value_NullValue struct { + NullValue NullValue `protobuf:"varint,1,opt,name=null_value,enum=google.protobuf.NullValue,oneof"` +} +type Value_NumberValue struct { + NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,oneof"` +} +type Value_StringValue struct { + StringValue string `protobuf:"bytes,3,opt,name=string_value,oneof"` +} +type Value_BoolValue struct { + BoolValue bool `protobuf:"varint,4,opt,name=bool_value,oneof"` +} +type Value_StructValue struct { + StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,oneof"` +} +type Value_ListValue struct { + ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,oneof"` +} + +func (*Value_NullValue) isValue_Kind() {} +func (*Value_NumberValue) isValue_Kind() {} +func (*Value_StringValue) isValue_Kind() {} +func (*Value_BoolValue) isValue_Kind() {} +func (*Value_StructValue) isValue_Kind() {} +func (*Value_ListValue) isValue_Kind() {} + +func (m *Value) GetKind() isValue_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (m *Value) GetNullValue() NullValue { + if x, ok := m.GetKind().(*Value_NullValue); ok { + return x.NullValue + } + return NullValue_NULL_VALUE +} + +func (m *Value) GetNumberValue() float64 { + if x, ok := m.GetKind().(*Value_NumberValue); ok { + return x.NumberValue + } + return 0 +} + +func (m *Value) GetStringValue() string { + if x, ok := m.GetKind().(*Value_StringValue); ok { + return x.StringValue + } + return "" +} + +func (m *Value) GetBoolValue() bool { + if x, ok := m.GetKind().(*Value_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (m *Value) GetStructValue() *Struct { + if x, ok := m.GetKind().(*Value_StructValue); ok { + return x.StructValue + } + return nil +} + +func (m *Value) GetListValue() *ListValue { + if x, ok := m.GetKind().(*Value_ListValue); ok { + return x.ListValue + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Value) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Value_OneofMarshaler, _Value_OneofUnmarshaler, _Value_OneofSizer, []interface{}{ + (*Value_NullValue)(nil), + (*Value_NumberValue)(nil), + (*Value_StringValue)(nil), + (*Value_BoolValue)(nil), + (*Value_StructValue)(nil), + (*Value_ListValue)(nil), + } +} + +func _Value_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Value) + // kind + switch x := m.Kind.(type) { + case *Value_NullValue: + b.EncodeVarint(1<<3 | proto.WireVarint) + b.EncodeVarint(uint64(x.NullValue)) + case *Value_NumberValue: + b.EncodeVarint(2<<3 | proto.WireFixed64) + b.EncodeFixed64(math.Float64bits(x.NumberValue)) + case *Value_StringValue: + b.EncodeVarint(3<<3 | proto.WireBytes) + b.EncodeStringBytes(x.StringValue) + case *Value_BoolValue: + t := uint64(0) + if x.BoolValue { + t = 1 + } + b.EncodeVarint(4<<3 | proto.WireVarint) + b.EncodeVarint(t) + case *Value_StructValue: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.StructValue); err != nil { + return err + } + case *Value_ListValue: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ListValue); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Value.Kind has unexpected type %T", x) + } + return nil +} + +func _Value_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Value) + switch tag { + case 1: // kind.null_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Kind = &Value_NullValue{NullValue(x)} + return true, err + case 2: // kind.number_value + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Kind = &Value_NumberValue{math.Float64frombits(x)} + return true, err + case 3: // kind.string_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Kind = &Value_StringValue{x} + return true, err + case 4: // kind.bool_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Kind = &Value_BoolValue{x != 0} + return true, err + case 5: // kind.struct_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Struct) + err := b.DecodeMessage(msg) + m.Kind = &Value_StructValue{msg} + return true, err + case 6: // kind.list_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ListValue) + err := b.DecodeMessage(msg) + m.Kind = &Value_ListValue{msg} + return true, err + default: + return false, nil + } +} + +func _Value_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Value) + // kind + switch x := m.Kind.(type) { + case *Value_NullValue: + n += proto.SizeVarint(1<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.NullValue)) + case *Value_NumberValue: + n += proto.SizeVarint(2<<3 | proto.WireFixed64) + n += 8 + case *Value_StringValue: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.StringValue))) + n += len(x.StringValue) + case *Value_BoolValue: + n += proto.SizeVarint(4<<3 | proto.WireVarint) + n += 1 + case *Value_StructValue: + s := proto.Size(x.StructValue) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Value_ListValue: + s := proto.Size(x.ListValue) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +type ListValue struct { + // Repeated field of dynamically typed values. + Values []*Value `protobuf:"bytes,1,rep,name=values" json:"values,omitempty"` +} + +func (m *ListValue) Reset() { *m = ListValue{} } +func (m *ListValue) String() string { return proto.CompactTextString(m) } +func (*ListValue) ProtoMessage() {} +func (*ListValue) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{2} } + +func (m *ListValue) GetValues() []*Value { + if m != nil { + return m.Values + } + return nil +} + +func init() { + proto.RegisterType((*Struct)(nil), "google.protobuf.Struct") + proto.RegisterType((*Value)(nil), "google.protobuf.Value") + proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue") + proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value) +} + +var fileDescriptor6 = []byte{ + // 345 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0x90, 0xdf, 0x4a, 0x02, 0x41, + 0x14, 0xc6, 0x9d, 0x5d, 0x1d, 0xf2, 0x6c, 0x58, 0x4c, 0x61, 0x62, 0x11, 0x61, 0x14, 0xd2, 0xc5, + 0x1a, 0x7a, 0x13, 0xdd, 0xb5, 0x60, 0x06, 0x2d, 0xb2, 0x50, 0xda, 0xa5, 0xb4, 0x3a, 0xca, 0xe2, + 0x38, 0x23, 0xfb, 0xa7, 0xf0, 0xbe, 0x07, 0x89, 0x2e, 0x7b, 0xb4, 0x9e, 0xa2, 0xd9, 0x19, 0xd7, + 0x42, 0xf1, 0xf2, 0x7c, 0xe7, 0x77, 0xbe, 0xf3, 0x9d, 0x03, 0x27, 0x13, 0x21, 0x26, 0x8c, 0x36, + 0xe6, 0xa1, 0x88, 0x85, 0x9f, 0x8c, 0x1b, 0x51, 0x1c, 0x26, 0xc3, 0xd8, 0x56, 0x35, 0xd9, 0xd3, + 0x5d, 0x3b, 0xeb, 0xd6, 0x3e, 0x10, 0xe0, 0x27, 0x45, 0x90, 0x16, 0xe0, 0x71, 0x40, 0xd9, 0x28, + 0xaa, 0xa0, 0x33, 0xb3, 0x6e, 0x35, 0xcf, 0xed, 0x35, 0xd8, 0xd6, 0xa0, 0x7d, 0xaf, 0xa8, 0x36, + 0x8f, 0xc3, 0x45, 0xb5, 0x0d, 0xd6, 0xbf, 0x92, 0x58, 0x60, 0x4e, 0xe9, 0x42, 0x1a, 0xa0, 0x7a, + 0x91, 0x5c, 0x40, 0xe1, 0xed, 0x95, 0x25, 0xb4, 0x62, 0xc8, 0xd2, 0x6a, 0x96, 0x37, 0xfc, 0xfa, + 0x69, 0xf7, 0xd6, 0xb8, 0x41, 0xb5, 0x1f, 0x04, 0x05, 0x55, 0x91, 0x6b, 0x00, 0x9e, 0x30, 0x36, + 0xd0, 0x93, 0xa9, 0x51, 0xa9, 0x59, 0xdd, 0x98, 0xec, 0x4a, 0x44, 0xf1, 0x0f, 0x39, 0x52, 0x86, + 0x5d, 0x9e, 0xcc, 0x7c, 0x1a, 0x0e, 0xfe, 0xb6, 0x21, 0xad, 0xcb, 0xdb, 0x03, 0x3e, 0x59, 0xea, + 0x66, 0x1a, 0x4a, 0xea, 0x87, 0x00, 0xbe, 0x10, 0xd9, 0x86, 0xbc, 0x54, 0x77, 0xa4, 0xda, 0x50, + 0xb4, 0x3c, 0x6f, 0xa9, 0x17, 0x54, 0xe6, 0xa3, 0x2d, 0x3f, 0x90, 0x03, 0x32, 0x28, 0x0b, 0xa2, + 0x0c, 0xc7, 0x0a, 0xdf, 0x0c, 0xea, 0x4a, 0x64, 0x19, 0xd4, 0xc1, 0x90, 0x9f, 0x06, 0x7c, 0x54, + 0x6b, 0x41, 0x71, 0x25, 0x93, 0x4b, 0xc0, 0xca, 0x21, 0xfb, 0xfa, 0x96, 0x2f, 0x5d, 0x1d, 0x43, + 0x71, 0x75, 0x34, 0x29, 0x01, 0x74, 0x7b, 0xae, 0x3b, 0xe8, 0xdf, 0xb9, 0xbd, 0xf6, 0x7e, 0xce, + 0xe9, 0xc2, 0xc1, 0x50, 0xcc, 0xd6, 0x27, 0x1d, 0x4b, 0x87, 0xf5, 0xd2, 0xda, 0x43, 0x9f, 0x08, + 0x7d, 0x19, 0x66, 0xc7, 0x73, 0xbe, 0x8d, 0xd3, 0x8e, 0x26, 0xbd, 0x6c, 0xc7, 0x0b, 0x65, 0xec, + 0x91, 0x8b, 0x77, 0xfe, 0xbc, 0x98, 0xd3, 0xc8, 0xc7, 0xca, 0xa2, 0xf5, 0x1b, 0x00, 0x00, 0xff, + 0xff, 0x64, 0xdf, 0xd2, 0xb1, 0x4d, 0x02, 0x00, 0x00, +} diff --git a/vendor/go.pedge.io/pb/go/google/protobuf/timestamp.pb.go b/vendor/go.pedge.io/pb/go/google/protobuf/timestamp.pb.go new file mode 100644 index 000000000..eea79513f --- /dev/null +++ b/vendor/go.pedge.io/pb/go/google/protobuf/timestamp.pb.go @@ -0,0 +1,104 @@ +// Code generated by protoc-gen-go. +// source: google/protobuf/timestamp.proto +// DO NOT EDIT! + +package google_protobuf + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// A Timestamp represents a point in time independent of any time zone +// or calendar, represented as seconds and fractions of seconds at +// nanosecond resolution in UTC Epoch time. It is encoded using the +// Proleptic Gregorian Calendar which extends the Gregorian calendar +// backwards to year one. It is encoded assuming all minutes are 60 +// seconds long, i.e. leap seconds are "smeared" so that no leap second +// table is needed for interpretation. Range is from +// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. +// By restricting to that range, we ensure that we can convert to +// and from RFC 3339 date strings. +// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// now = time.time() +// seconds = int(now) +// nanos = int((now - seconds) * 10**9) +// timestamp = Timestamp(seconds=seconds, nanos=nanos) +// +// +type Timestamp struct { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos" json:"nanos,omitempty"` +} + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (m *Timestamp) String() string { return proto.CompactTextString(m) } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor7, []int{0} } + +func init() { + proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") +} + +var fileDescriptor7 = []byte{ + // 160 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d, + 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28, + 0x69, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0xf1, 0x73, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5, + 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x0b, 0xf1, 0x72, 0xb1, 0xe6, 0x25, 0xe6, 0xe5, 0x17, + 0x4b, 0x30, 0x01, 0xb9, 0xac, 0x4e, 0x21, 0x5c, 0xc2, 0xc9, 0xf9, 0xb9, 0x7a, 0x68, 0x66, 0x38, + 0xf1, 0xc1, 0x4d, 0x08, 0x00, 0x09, 0x05, 0x30, 0x2e, 0x60, 0x64, 0xfc, 0xc1, 0xc8, 0xb8, 0x88, + 0x89, 0xd9, 0x3d, 0xc0, 0x69, 0x15, 0x93, 0x9c, 0x3b, 0x44, 0x7d, 0x00, 0x54, 0xbd, 0x5e, 0x78, + 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x48, 0x65, 0x41, 0x6a, 0x71, 0x12, 0x1b, 0xd8, + 0x20, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xd6, 0xc4, 0x74, 0x0b, 0xbd, 0x00, 0x00, 0x00, +} diff --git a/vendor/go.pedge.io/pb/go/google/protobuf/type.pb.go b/vendor/go.pedge.io/pb/go/google/protobuf/type.pb.go new file mode 100644 index 000000000..56dea14ab --- /dev/null +++ b/vendor/go.pedge.io/pb/go/google/protobuf/type.pb.go @@ -0,0 +1,385 @@ +// Code generated by protoc-gen-go. +// source: google/protobuf/type.proto +// DO NOT EDIT! + +package google_protobuf + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// The syntax in which a protocol buffer element is defined. +type Syntax int32 + +const ( + // Syntax `proto2`. + Syntax_SYNTAX_PROTO2 Syntax = 0 + // Syntax `proto3`. + Syntax_SYNTAX_PROTO3 Syntax = 1 +) + +var Syntax_name = map[int32]string{ + 0: "SYNTAX_PROTO2", + 1: "SYNTAX_PROTO3", +} +var Syntax_value = map[string]int32{ + "SYNTAX_PROTO2": 0, + "SYNTAX_PROTO3": 1, +} + +func (x Syntax) String() string { + return proto.EnumName(Syntax_name, int32(x)) +} +func (Syntax) EnumDescriptor() ([]byte, []int) { return fileDescriptor8, []int{0} } + +// Basic field types. +type Field_Kind int32 + +const ( + // Field type unknown. + Field_TYPE_UNKNOWN Field_Kind = 0 + // Field type double. + Field_TYPE_DOUBLE Field_Kind = 1 + // Field type float. + Field_TYPE_FLOAT Field_Kind = 2 + // Field type int64. + Field_TYPE_INT64 Field_Kind = 3 + // Field type uint64. + Field_TYPE_UINT64 Field_Kind = 4 + // Field type int32. + Field_TYPE_INT32 Field_Kind = 5 + // Field type fixed64. + Field_TYPE_FIXED64 Field_Kind = 6 + // Field type fixed32. + Field_TYPE_FIXED32 Field_Kind = 7 + // Field type bool. + Field_TYPE_BOOL Field_Kind = 8 + // Field type string. + Field_TYPE_STRING Field_Kind = 9 + // Field type group. Proto2 syntax only, and deprecated. + Field_TYPE_GROUP Field_Kind = 10 + // Field type message. + Field_TYPE_MESSAGE Field_Kind = 11 + // Field type bytes. + Field_TYPE_BYTES Field_Kind = 12 + // Field type uint32. + Field_TYPE_UINT32 Field_Kind = 13 + // Field type enum. + Field_TYPE_ENUM Field_Kind = 14 + // Field type sfixed32. + Field_TYPE_SFIXED32 Field_Kind = 15 + // Field type sfixed64. + Field_TYPE_SFIXED64 Field_Kind = 16 + // Field type sint32. + Field_TYPE_SINT32 Field_Kind = 17 + // Field type sint64. + Field_TYPE_SINT64 Field_Kind = 18 +) + +var Field_Kind_name = map[int32]string{ + 0: "TYPE_UNKNOWN", + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", +} +var Field_Kind_value = map[string]int32{ + "TYPE_UNKNOWN": 0, + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, +} + +func (x Field_Kind) String() string { + return proto.EnumName(Field_Kind_name, int32(x)) +} +func (Field_Kind) EnumDescriptor() ([]byte, []int) { return fileDescriptor8, []int{1, 0} } + +// Whether a field is optional, required, or repeated. +type Field_Cardinality int32 + +const ( + // For fields with unknown cardinality. + Field_CARDINALITY_UNKNOWN Field_Cardinality = 0 + // For optional fields. + Field_CARDINALITY_OPTIONAL Field_Cardinality = 1 + // For required fields. Proto2 syntax only. + Field_CARDINALITY_REQUIRED Field_Cardinality = 2 + // For repeated fields. + Field_CARDINALITY_REPEATED Field_Cardinality = 3 +) + +var Field_Cardinality_name = map[int32]string{ + 0: "CARDINALITY_UNKNOWN", + 1: "CARDINALITY_OPTIONAL", + 2: "CARDINALITY_REQUIRED", + 3: "CARDINALITY_REPEATED", +} +var Field_Cardinality_value = map[string]int32{ + "CARDINALITY_UNKNOWN": 0, + "CARDINALITY_OPTIONAL": 1, + "CARDINALITY_REQUIRED": 2, + "CARDINALITY_REPEATED": 3, +} + +func (x Field_Cardinality) String() string { + return proto.EnumName(Field_Cardinality_name, int32(x)) +} +func (Field_Cardinality) EnumDescriptor() ([]byte, []int) { return fileDescriptor8, []int{1, 1} } + +// A protocol buffer message type. +type Type struct { + // The fully qualified message name. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The list of fields. + Fields []*Field `protobuf:"bytes,2,rep,name=fields" json:"fields,omitempty"` + // The list of types appearing in `oneof` definitions in this type. + Oneofs []string `protobuf:"bytes,3,rep,name=oneofs" json:"oneofs,omitempty"` + // The protocol buffer options. + Options []*Option `protobuf:"bytes,4,rep,name=options" json:"options,omitempty"` + // The source context. + SourceContext *SourceContext `protobuf:"bytes,5,opt,name=source_context" json:"source_context,omitempty"` + // The source syntax. + Syntax Syntax `protobuf:"varint,6,opt,name=syntax,enum=google.protobuf.Syntax" json:"syntax,omitempty"` +} + +func (m *Type) Reset() { *m = Type{} } +func (m *Type) String() string { return proto.CompactTextString(m) } +func (*Type) ProtoMessage() {} +func (*Type) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{0} } + +func (m *Type) GetFields() []*Field { + if m != nil { + return m.Fields + } + return nil +} + +func (m *Type) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (m *Type) GetSourceContext() *SourceContext { + if m != nil { + return m.SourceContext + } + return nil +} + +// A single field of a message type. +type Field struct { + // The field type. + Kind Field_Kind `protobuf:"varint,1,opt,name=kind,enum=google.protobuf.Field_Kind" json:"kind,omitempty"` + // The field cardinality. + Cardinality Field_Cardinality `protobuf:"varint,2,opt,name=cardinality,enum=google.protobuf.Field_Cardinality" json:"cardinality,omitempty"` + // The field number. + Number int32 `protobuf:"varint,3,opt,name=number" json:"number,omitempty"` + // The field name. + Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` + // The field type URL, without the scheme, for message or enumeration + // types. Example: `"type.googleapis.com/google.protobuf.Timestamp"`. + TypeUrl string `protobuf:"bytes,6,opt,name=type_url" json:"type_url,omitempty"` + // The index of the field type in `Type.oneofs`, for message or enumeration + // types. The first type has index 1; zero means the type is not in the list. + OneofIndex int32 `protobuf:"varint,7,opt,name=oneof_index" json:"oneof_index,omitempty"` + // Whether to use alternative packed wire representation. + Packed bool `protobuf:"varint,8,opt,name=packed" json:"packed,omitempty"` + // The protocol buffer options. + Options []*Option `protobuf:"bytes,9,rep,name=options" json:"options,omitempty"` + // The field JSON name. + JsonName string `protobuf:"bytes,10,opt,name=json_name" json:"json_name,omitempty"` + // The string value of the default value of this field. Proto2 syntax only. + DefaultValue string `protobuf:"bytes,11,opt,name=default_value" json:"default_value,omitempty"` +} + +func (m *Field) Reset() { *m = Field{} } +func (m *Field) String() string { return proto.CompactTextString(m) } +func (*Field) ProtoMessage() {} +func (*Field) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{1} } + +func (m *Field) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +// Enum type definition. +type Enum struct { + // Enum type name. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Enum value definitions. + Enumvalue []*EnumValue `protobuf:"bytes,2,rep,name=enumvalue" json:"enumvalue,omitempty"` + // Protocol buffer options. + Options []*Option `protobuf:"bytes,3,rep,name=options" json:"options,omitempty"` + // The source context. + SourceContext *SourceContext `protobuf:"bytes,4,opt,name=source_context" json:"source_context,omitempty"` + // The source syntax. + Syntax Syntax `protobuf:"varint,5,opt,name=syntax,enum=google.protobuf.Syntax" json:"syntax,omitempty"` +} + +func (m *Enum) Reset() { *m = Enum{} } +func (m *Enum) String() string { return proto.CompactTextString(m) } +func (*Enum) ProtoMessage() {} +func (*Enum) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{2} } + +func (m *Enum) GetEnumvalue() []*EnumValue { + if m != nil { + return m.Enumvalue + } + return nil +} + +func (m *Enum) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (m *Enum) GetSourceContext() *SourceContext { + if m != nil { + return m.SourceContext + } + return nil +} + +// Enum value definition. +type EnumValue struct { + // Enum value name. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Enum value number. + Number int32 `protobuf:"varint,2,opt,name=number" json:"number,omitempty"` + // Protocol buffer options. + Options []*Option `protobuf:"bytes,3,rep,name=options" json:"options,omitempty"` +} + +func (m *EnumValue) Reset() { *m = EnumValue{} } +func (m *EnumValue) String() string { return proto.CompactTextString(m) } +func (*EnumValue) ProtoMessage() {} +func (*EnumValue) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{3} } + +func (m *EnumValue) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +// A protocol buffer option, which can be attached to a message, field, +// enumeration, etc. +type Option struct { + // The option's name. For example, `"java_package"`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The option's value. For example, `"com.google.protobuf"`. + Value *Any `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *Option) Reset() { *m = Option{} } +func (m *Option) String() string { return proto.CompactTextString(m) } +func (*Option) ProtoMessage() {} +func (*Option) Descriptor() ([]byte, []int) { return fileDescriptor8, []int{4} } + +func (m *Option) GetValue() *Any { + if m != nil { + return m.Value + } + return nil +} + +func init() { + proto.RegisterType((*Type)(nil), "google.protobuf.Type") + proto.RegisterType((*Field)(nil), "google.protobuf.Field") + proto.RegisterType((*Enum)(nil), "google.protobuf.Enum") + proto.RegisterType((*EnumValue)(nil), "google.protobuf.EnumValue") + proto.RegisterType((*Option)(nil), "google.protobuf.Option") + proto.RegisterEnum("google.protobuf.Syntax", Syntax_name, Syntax_value) + proto.RegisterEnum("google.protobuf.Field_Kind", Field_Kind_name, Field_Kind_value) + proto.RegisterEnum("google.protobuf.Field_Cardinality", Field_Cardinality_name, Field_Cardinality_value) +} + +var fileDescriptor8 = []byte{ + // 712 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x54, 0xc1, 0x6e, 0xda, 0x4a, + 0x14, 0x7d, 0x06, 0xe3, 0xe0, 0xeb, 0x40, 0x26, 0x4e, 0xde, 0x8b, 0x1f, 0x4f, 0x8a, 0x22, 0xbf, + 0xaa, 0xa5, 0x95, 0x4a, 0x24, 0x52, 0xa5, 0x8b, 0xae, 0x4c, 0x70, 0x90, 0x15, 0x62, 0xbb, 0xd8, + 0x34, 0x61, 0x65, 0x39, 0x30, 0x44, 0x34, 0x8e, 0x8d, 0xb0, 0x69, 0xc3, 0xdf, 0x54, 0x5d, 0xf6, + 0x4b, 0xba, 0xea, 0xae, 0xea, 0xef, 0x74, 0x3c, 0x2e, 0x06, 0x4c, 0x1a, 0x35, 0x3b, 0xe6, 0x9e, + 0x73, 0xef, 0x39, 0x73, 0x3c, 0x17, 0xa8, 0x5c, 0x07, 0xc1, 0xb5, 0x87, 0x0f, 0xc7, 0x93, 0x20, + 0x0a, 0xae, 0xa6, 0xc3, 0xc3, 0x68, 0x36, 0xc6, 0x35, 0x7a, 0x12, 0xb7, 0x12, 0xac, 0x36, 0xc7, + 0x2a, 0xff, 0x66, 0xc9, 0xae, 0x3f, 0x4b, 0xd0, 0xca, 0x93, 0x2c, 0x14, 0x06, 0xd3, 0x49, 0x1f, + 0x3b, 0xfd, 0xc0, 0x8f, 0xf0, 0x5d, 0x94, 0xb0, 0xe4, 0x1f, 0x0c, 0xb0, 0x36, 0x11, 0x10, 0x37, + 0x81, 0xf5, 0xdd, 0x5b, 0x2c, 0x31, 0x07, 0x4c, 0x95, 0x17, 0x9f, 0x02, 0x37, 0x1c, 0x61, 0x6f, + 0x10, 0x4a, 0xb9, 0x83, 0x7c, 0x55, 0xa8, 0xff, 0x53, 0xcb, 0x28, 0xd7, 0x4e, 0x63, 0x58, 0x2c, + 0x03, 0x17, 0xf8, 0x38, 0x18, 0x86, 0x52, 0x9e, 0xf0, 0x78, 0xb1, 0x0a, 0x1b, 0xc1, 0x38, 0x1a, + 0x05, 0x7e, 0x28, 0xb1, 0xb4, 0x71, 0x6f, 0xad, 0xd1, 0xa0, 0xb8, 0x78, 0x0c, 0xe5, 0x55, 0x43, + 0x52, 0x81, 0x28, 0x0b, 0xf5, 0xfd, 0xb5, 0x06, 0x8b, 0xd2, 0x4e, 0x12, 0x96, 0xf8, 0x0c, 0xb8, + 0x70, 0xe6, 0x47, 0xee, 0x9d, 0xc4, 0x11, 0x7e, 0xf9, 0x1e, 0x01, 0x8b, 0xc2, 0xf2, 0xf7, 0x02, + 0x14, 0x12, 0x93, 0xcf, 0x81, 0xbd, 0x19, 0xf9, 0x03, 0x7a, 0xb5, 0x72, 0xfd, 0xbf, 0xfb, 0xaf, + 0x52, 0x3b, 0x23, 0x14, 0xf1, 0x35, 0x08, 0x7d, 0x77, 0x32, 0x18, 0xf9, 0xae, 0x37, 0x8a, 0x66, + 0xe4, 0xf2, 0x71, 0x87, 0xfc, 0x9b, 0x8e, 0x93, 0x05, 0x33, 0x0e, 0xc2, 0x9f, 0xde, 0x5e, 0xe1, + 0x09, 0x09, 0x82, 0xa9, 0x16, 0xd2, 0x38, 0x59, 0x1a, 0x27, 0x82, 0x62, 0xfc, 0x15, 0x9d, 0xe9, + 0xc4, 0xa3, 0xb6, 0x79, 0x71, 0x07, 0x04, 0x1a, 0x9c, 0x43, 0x54, 0xf1, 0x9d, 0xb4, 0x41, 0x9b, + 0xc8, 0x90, 0xb1, 0xdb, 0xbf, 0xc1, 0x03, 0xa9, 0x48, 0xce, 0xc5, 0xe5, 0x34, 0xf9, 0x87, 0xd3, + 0xdc, 0x06, 0xfe, 0x7d, 0x18, 0xf8, 0x0e, 0xd5, 0x04, 0xaa, 0xf0, 0x37, 0x94, 0x06, 0x78, 0xe8, + 0x4e, 0xbd, 0xc8, 0xf9, 0xe0, 0x7a, 0x53, 0x2c, 0x09, 0x71, 0x59, 0xfe, 0x9a, 0x03, 0x96, 0x5e, + 0x15, 0xc1, 0xa6, 0xdd, 0x33, 0x55, 0xa7, 0xab, 0x9f, 0xe9, 0xc6, 0x85, 0x8e, 0xfe, 0x12, 0xb7, + 0x40, 0xa0, 0x95, 0xa6, 0xd1, 0x6d, 0xb4, 0x55, 0xc4, 0x10, 0x3f, 0x40, 0x0b, 0xa7, 0x6d, 0x43, + 0xb1, 0x51, 0x2e, 0x3d, 0x6b, 0xba, 0x7d, 0xfc, 0x0a, 0xe5, 0xd3, 0x86, 0x6e, 0x52, 0x60, 0x97, + 0x09, 0x47, 0x75, 0x54, 0x48, 0x35, 0x4e, 0xb5, 0x4b, 0xb5, 0x49, 0x18, 0xdc, 0x6a, 0x85, 0x70, + 0x36, 0xc4, 0x12, 0xf0, 0xb4, 0xd2, 0x30, 0x8c, 0x36, 0x2a, 0xa6, 0x33, 0x2d, 0xbb, 0xa3, 0xe9, + 0x2d, 0xc4, 0xa7, 0x33, 0x5b, 0x1d, 0xa3, 0x6b, 0x22, 0x48, 0x27, 0x9c, 0xab, 0x96, 0xa5, 0xb4, + 0x54, 0x24, 0xa4, 0x8c, 0x46, 0xcf, 0x56, 0x2d, 0xb4, 0xb9, 0x62, 0x8b, 0x48, 0x94, 0x52, 0x09, + 0x55, 0xef, 0x9e, 0xa3, 0x32, 0x09, 0xab, 0x94, 0x48, 0xcc, 0x4d, 0x6c, 0x65, 0x4a, 0xc4, 0x29, + 0x5a, 0x18, 0x49, 0xa6, 0x6c, 0xaf, 0x14, 0x08, 0x43, 0x94, 0x23, 0x10, 0x96, 0x9f, 0xc0, 0x1e, + 0xec, 0x9c, 0x28, 0x9d, 0xa6, 0xa6, 0x2b, 0x6d, 0xcd, 0xee, 0x2d, 0xe5, 0x2a, 0xc1, 0xee, 0x32, + 0x60, 0x98, 0xb6, 0x66, 0x90, 0xdf, 0x24, 0xe0, 0x0c, 0xd2, 0x51, 0xdf, 0x76, 0xb5, 0x8e, 0xda, + 0x24, 0x51, 0xaf, 0x21, 0xa6, 0xaa, 0xd8, 0x04, 0xc9, 0xcb, 0xdf, 0xc8, 0xc6, 0xaa, 0xe4, 0xad, + 0x65, 0x36, 0xf6, 0x25, 0xf0, 0x98, 0x54, 0x93, 0x4f, 0x9d, 0x2c, 0x6d, 0x65, 0xed, 0xb5, 0xc4, + 0x7d, 0xef, 0x62, 0xc6, 0xf2, 0xd3, 0xca, 0x3f, 0x76, 0x51, 0xd9, 0x47, 0x2e, 0x6a, 0xe1, 0xe1, + 0x45, 0xb5, 0x80, 0x5f, 0xf8, 0x5a, 0xbd, 0xd4, 0x62, 0xab, 0x72, 0x74, 0x41, 0xfe, 0xd8, 0xb5, + 0xfc, 0x06, 0xb8, 0x5f, 0xfe, 0x57, 0x27, 0xfe, 0x0f, 0x85, 0x79, 0x44, 0xf1, 0x25, 0x76, 0xd7, + 0xfa, 0x15, 0x7f, 0xf6, 0xa2, 0x06, 0x5c, 0xe2, 0x2d, 0x7e, 0x17, 0x56, 0x4f, 0xb7, 0x95, 0x4b, + 0xc7, 0xec, 0x18, 0xb6, 0x51, 0x27, 0x5f, 0x33, 0x53, 0x3a, 0x42, 0x4c, 0xa3, 0x0d, 0x3b, 0xfd, + 0xe0, 0x36, 0x3b, 0xaa, 0xc1, 0xc7, 0x7f, 0xac, 0x66, 0x7c, 0x32, 0x99, 0x4f, 0x0c, 0xf3, 0x39, + 0x97, 0x6f, 0x99, 0x8d, 0x2f, 0xb9, 0xfd, 0x56, 0xc2, 0x33, 0xe7, 0x92, 0x17, 0xd8, 0xf3, 0xce, + 0xfc, 0xe0, 0xa3, 0x1f, 0xf3, 0xc3, 0x2b, 0x8e, 0x0e, 0x38, 0xfa, 0x19, 0x00, 0x00, 0xff, 0xff, + 0x18, 0x41, 0x99, 0xa0, 0x09, 0x06, 0x00, 0x00, +} diff --git a/vendor/go.pedge.io/pb/go/google/protobuf/wrappers.pb.go b/vendor/go.pedge.io/pb/go/google/protobuf/wrappers.pb.go new file mode 100644 index 000000000..88a53b182 --- /dev/null +++ b/vendor/go.pedge.io/pb/go/google/protobuf/wrappers.pb.go @@ -0,0 +1,161 @@ +// Code generated by protoc-gen-go. +// source: google/protobuf/wrappers.proto +// DO NOT EDIT! + +package google_protobuf + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +type DoubleValue struct { + // The double value. + Value float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` +} + +func (m *DoubleValue) Reset() { *m = DoubleValue{} } +func (m *DoubleValue) String() string { return proto.CompactTextString(m) } +func (*DoubleValue) ProtoMessage() {} +func (*DoubleValue) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{0} } + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +type FloatValue struct { + // The float value. + Value float32 `protobuf:"fixed32,1,opt,name=value" json:"value,omitempty"` +} + +func (m *FloatValue) Reset() { *m = FloatValue{} } +func (m *FloatValue) String() string { return proto.CompactTextString(m) } +func (*FloatValue) ProtoMessage() {} +func (*FloatValue) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{1} } + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +type Int64Value struct { + // The int64 value. + Value int64 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` +} + +func (m *Int64Value) Reset() { *m = Int64Value{} } +func (m *Int64Value) String() string { return proto.CompactTextString(m) } +func (*Int64Value) ProtoMessage() {} +func (*Int64Value) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{2} } + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +type UInt64Value struct { + // The uint64 value. + Value uint64 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` +} + +func (m *UInt64Value) Reset() { *m = UInt64Value{} } +func (m *UInt64Value) String() string { return proto.CompactTextString(m) } +func (*UInt64Value) ProtoMessage() {} +func (*UInt64Value) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{3} } + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +type Int32Value struct { + // The int32 value. + Value int32 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` +} + +func (m *Int32Value) Reset() { *m = Int32Value{} } +func (m *Int32Value) String() string { return proto.CompactTextString(m) } +func (*Int32Value) ProtoMessage() {} +func (*Int32Value) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{4} } + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +type UInt32Value struct { + // The uint32 value. + Value uint32 `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` +} + +func (m *UInt32Value) Reset() { *m = UInt32Value{} } +func (m *UInt32Value) String() string { return proto.CompactTextString(m) } +func (*UInt32Value) ProtoMessage() {} +func (*UInt32Value) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{5} } + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +type BoolValue struct { + // The bool value. + Value bool `protobuf:"varint,1,opt,name=value" json:"value,omitempty"` +} + +func (m *BoolValue) Reset() { *m = BoolValue{} } +func (m *BoolValue) String() string { return proto.CompactTextString(m) } +func (*BoolValue) ProtoMessage() {} +func (*BoolValue) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{6} } + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +type StringValue struct { + // The string value. + Value string `protobuf:"bytes,1,opt,name=value" json:"value,omitempty"` +} + +func (m *StringValue) Reset() { *m = StringValue{} } +func (m *StringValue) String() string { return proto.CompactTextString(m) } +func (*StringValue) ProtoMessage() {} +func (*StringValue) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{7} } + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +type BytesValue struct { + // The bytes value. + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *BytesValue) Reset() { *m = BytesValue{} } +func (m *BytesValue) String() string { return proto.CompactTextString(m) } +func (*BytesValue) ProtoMessage() {} +func (*BytesValue) Descriptor() ([]byte, []int) { return fileDescriptor9, []int{8} } + +func init() { + proto.RegisterType((*DoubleValue)(nil), "google.protobuf.DoubleValue") + proto.RegisterType((*FloatValue)(nil), "google.protobuf.FloatValue") + proto.RegisterType((*Int64Value)(nil), "google.protobuf.Int64Value") + proto.RegisterType((*UInt64Value)(nil), "google.protobuf.UInt64Value") + proto.RegisterType((*Int32Value)(nil), "google.protobuf.Int32Value") + proto.RegisterType((*UInt32Value)(nil), "google.protobuf.UInt32Value") + proto.RegisterType((*BoolValue)(nil), "google.protobuf.BoolValue") + proto.RegisterType((*StringValue)(nil), "google.protobuf.StringValue") + proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue") +} + +var fileDescriptor9 = []byte{ + // 223 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x2f, 0x4a, 0x2c, + 0x28, 0x48, 0x2d, 0x2a, 0xd6, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x32, + 0x5c, 0xdc, 0x2e, 0xf9, 0xa5, 0x49, 0x39, 0xa9, 0x61, 0x89, 0x39, 0xa5, 0xa9, 0x42, 0xbc, 0x5c, + 0xac, 0x65, 0x20, 0x86, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0xa3, 0x92, 0x34, 0x17, 0x97, 0x5b, 0x4e, + 0x7e, 0x62, 0x09, 0x16, 0x49, 0x26, 0x90, 0xa4, 0x67, 0x5e, 0x89, 0x99, 0x09, 0x16, 0x49, 0x66, + 0x90, 0xb9, 0xa1, 0xb8, 0x64, 0x59, 0xa0, 0x5a, 0x8d, 0x8d, 0xb0, 0x48, 0xb2, 0xc2, 0xb4, 0x62, + 0x95, 0xe5, 0x55, 0x92, 0xe2, 0xe2, 0x74, 0xca, 0xcf, 0xcf, 0xc1, 0x22, 0xc7, 0x01, 0xd2, 0x19, + 0x5c, 0x52, 0x94, 0x99, 0x97, 0x8e, 0x45, 0x96, 0x13, 0x64, 0xa9, 0x53, 0x65, 0x49, 0x6a, 0x31, + 0x16, 0x49, 0x1e, 0xa7, 0x60, 0x2e, 0xe1, 0xe4, 0xfc, 0x5c, 0x3d, 0xb4, 0xe0, 0x71, 0xe2, 0x0d, + 0x87, 0x86, 0x5f, 0x00, 0x48, 0x24, 0x80, 0x71, 0x01, 0x23, 0xe3, 0x0f, 0x46, 0xc6, 0x45, 0x4c, + 0xcc, 0xee, 0x01, 0x4e, 0xab, 0x98, 0xe4, 0xdc, 0x21, 0xca, 0x03, 0xa0, 0xca, 0xf5, 0xc2, 0x53, + 0x73, 0x72, 0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x2a, 0x0b, 0x52, 0x8b, 0x93, 0xd8, 0xc0, 0xe6, + 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0xd7, 0x8e, 0x50, 0xd1, 0x96, 0x01, 0x00, 0x00, +} diff --git a/vendor/go.pedge.io/proto/time/prototime.go b/vendor/go.pedge.io/proto/time/prototime.go new file mode 100644 index 000000000..b6e768236 --- /dev/null +++ b/vendor/go.pedge.io/proto/time/prototime.go @@ -0,0 +1,64 @@ +package prototime // import "go.pedge.io/proto/time" + +import ( + "time" + + "go.pedge.io/pb/go/google/protobuf" +) + +// TimeToTimestamp converts a go Time to a protobuf Timestamp. +func TimeToTimestamp(t time.Time) *google_protobuf.Timestamp { + return &google_protobuf.Timestamp{ + Seconds: t.UnixNano() / int64(time.Second), + Nanos: int32(t.UnixNano() % int64(time.Second)), + } +} + +// TimestampToTime converts a protobuf Timestamp to a go Time. +func TimestampToTime(timestamp *google_protobuf.Timestamp) time.Time { + if timestamp == nil { + return time.Unix(0, 0).UTC() + } + return time.Unix( + timestamp.Seconds, + int64(timestamp.Nanos), + ).UTC() +} + +// TimestampLess returns true if i is before j. +func TimestampLess(i *google_protobuf.Timestamp, j *google_protobuf.Timestamp) bool { + if j == nil { + return false + } + if i == nil { + return true + } + if i.Seconds < j.Seconds { + return true + } + if i.Seconds > j.Seconds { + return false + } + return i.Nanos < j.Nanos +} + +// Now returns the current time as a protobuf Timestamp. +func Now() *google_protobuf.Timestamp { + return TimeToTimestamp(time.Now().UTC()) +} + +// DurationToProto converts a go Duration to a protobuf Duration. +func DurationToProto(d time.Duration) *google_protobuf.Duration { + return &google_protobuf.Duration{ + Seconds: int64(d) / int64(time.Second), + Nanos: int32(int64(d) % int64(time.Second)), + } +} + +// DurationFromProto converts a protobuf Duration to a go Duration. +func DurationFromProto(duration *google_protobuf.Duration) time.Duration { + if duration == nil { + return 0 + } + return time.Duration((duration.Seconds * int64(time.Second)) + int64(duration.Nanos)) +} diff --git a/vendor/vendor.json b/vendor/vendor.json index 97b538aae..6f0ffa5d1 100644 --- a/vendor/vendor.json +++ b/vendor/vendor.json @@ -4,143 +4,143 @@ "package": [ { "path": "bazil.org/fuse", - "revision": "a8bc3b86317dc95ed28fdefcebc1dbce8baa88e9", - "revisionTime": "2015-12-02T10:18:56-08:00" + "revision": "2345f52f9d579e93916093ea4f19e29a79dd5050", + "revisionTime": "2016-01-08T15:59:59-08:00" }, { "path": "bazil.org/fuse/fs", - "revision": "a8bc3b86317dc95ed28fdefcebc1dbce8baa88e9", - "revisionTime": "2015-12-02T10:18:56-08:00" + "revision": "2345f52f9d579e93916093ea4f19e29a79dd5050", + "revisionTime": "2016-01-08T15:59:59-08:00" }, { "path": "bazil.org/fuse/fuseutil", - "revision": "a8bc3b86317dc95ed28fdefcebc1dbce8baa88e9", - "revisionTime": "2015-12-02T10:18:56-08:00" + "revision": "2345f52f9d579e93916093ea4f19e29a79dd5050", + "revisionTime": "2016-01-08T15:59:59-08:00" }, { "path": "github.com/Sirupsen/logrus", - "revision": "446d1c146faa8ed3f4218f056fcd165f6bcfda81", - "revisionTime": "2015-12-04T09:14:43-05:00" + "revision": "f7f79f729e0fbe2fcc061db48a9ba0263f588252", + "revisionTime": "2016-01-18T19:00:32-05:00" }, { "path": "github.com/aws/aws-sdk-go/aws", - "revision": "80dd4951fdb3f711d31843b8d87871130ef2df67", - "revisionTime": "2015-12-18T17:19:25-08:00" + "revision": "87b1e60a50b09e4812dee560b33a238f67305804", + "revisionTime": "2016-01-21T14:17:43-08:00" }, { "path": "github.com/aws/aws-sdk-go/aws/awserr", - "revision": "80dd4951fdb3f711d31843b8d87871130ef2df67", - "revisionTime": "2015-12-18T17:19:25-08:00" + "revision": "87b1e60a50b09e4812dee560b33a238f67305804", + "revisionTime": "2016-01-21T14:17:43-08:00" }, { "path": "github.com/aws/aws-sdk-go/aws/awsutil", - "revision": "80dd4951fdb3f711d31843b8d87871130ef2df67", - "revisionTime": "2015-12-18T17:19:25-08:00" + "revision": "87b1e60a50b09e4812dee560b33a238f67305804", + "revisionTime": "2016-01-21T14:17:43-08:00" }, { "path": "github.com/aws/aws-sdk-go/aws/client", - "revision": "80dd4951fdb3f711d31843b8d87871130ef2df67", - "revisionTime": "2015-12-18T17:19:25-08:00" + "revision": "87b1e60a50b09e4812dee560b33a238f67305804", + "revisionTime": "2016-01-21T14:17:43-08:00" }, { "path": "github.com/aws/aws-sdk-go/aws/client/metadata", - "revision": "80dd4951fdb3f711d31843b8d87871130ef2df67", - "revisionTime": "2015-12-18T17:19:25-08:00" + "revision": "87b1e60a50b09e4812dee560b33a238f67305804", + "revisionTime": "2016-01-21T14:17:43-08:00" }, { "path": "github.com/aws/aws-sdk-go/aws/corehandlers", - "revision": "80dd4951fdb3f711d31843b8d87871130ef2df67", - "revisionTime": "2015-12-18T17:19:25-08:00" + "revision": "87b1e60a50b09e4812dee560b33a238f67305804", + "revisionTime": "2016-01-21T14:17:43-08:00" }, { "path": "github.com/aws/aws-sdk-go/aws/credentials", - "revision": "80dd4951fdb3f711d31843b8d87871130ef2df67", - "revisionTime": "2015-12-18T17:19:25-08:00" + "revision": "87b1e60a50b09e4812dee560b33a238f67305804", + "revisionTime": "2016-01-21T14:17:43-08:00" }, { "path": "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds", - "revision": "80dd4951fdb3f711d31843b8d87871130ef2df67", - "revisionTime": "2015-12-18T17:19:25-08:00" + "revision": "87b1e60a50b09e4812dee560b33a238f67305804", + "revisionTime": "2016-01-21T14:17:43-08:00" }, { "path": "github.com/aws/aws-sdk-go/aws/defaults", - "revision": "80dd4951fdb3f711d31843b8d87871130ef2df67", - "revisionTime": "2015-12-18T17:19:25-08:00" + "revision": "87b1e60a50b09e4812dee560b33a238f67305804", + "revisionTime": "2016-01-21T14:17:43-08:00" }, { "path": "github.com/aws/aws-sdk-go/aws/ec2metadata", - "revision": "80dd4951fdb3f711d31843b8d87871130ef2df67", - "revisionTime": "2015-12-18T17:19:25-08:00" + "revision": "87b1e60a50b09e4812dee560b33a238f67305804", + "revisionTime": "2016-01-21T14:17:43-08:00" }, { "path": "github.com/aws/aws-sdk-go/aws/request", - "revision": "80dd4951fdb3f711d31843b8d87871130ef2df67", - "revisionTime": "2015-12-18T17:19:25-08:00" + "revision": "87b1e60a50b09e4812dee560b33a238f67305804", + "revisionTime": "2016-01-21T14:17:43-08:00" }, { "path": "github.com/aws/aws-sdk-go/aws/session", - "revision": "80dd4951fdb3f711d31843b8d87871130ef2df67", - "revisionTime": "2015-12-18T17:19:25-08:00" + "revision": "87b1e60a50b09e4812dee560b33a238f67305804", + "revisionTime": "2016-01-21T14:17:43-08:00" }, { "path": "github.com/aws/aws-sdk-go/private/endpoints", - "revision": "80dd4951fdb3f711d31843b8d87871130ef2df67", - "revisionTime": "2015-12-18T17:19:25-08:00" + "revision": "87b1e60a50b09e4812dee560b33a238f67305804", + "revisionTime": "2016-01-21T14:17:43-08:00" }, { "path": "github.com/aws/aws-sdk-go/private/protocol/ec2query", - "revision": "80dd4951fdb3f711d31843b8d87871130ef2df67", - "revisionTime": "2015-12-18T17:19:25-08:00" + "revision": "87b1e60a50b09e4812dee560b33a238f67305804", + "revisionTime": "2016-01-21T14:17:43-08:00" }, { "path": "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil", - "revision": "80dd4951fdb3f711d31843b8d87871130ef2df67", - "revisionTime": "2015-12-18T17:19:25-08:00" + "revision": "87b1e60a50b09e4812dee560b33a238f67305804", + "revisionTime": "2016-01-21T14:17:43-08:00" }, { "path": "github.com/aws/aws-sdk-go/private/protocol/jsonrpc", - "revision": "80dd4951fdb3f711d31843b8d87871130ef2df67", - "revisionTime": "2015-12-18T17:19:25-08:00" + "revision": "87b1e60a50b09e4812dee560b33a238f67305804", + "revisionTime": "2016-01-21T14:17:43-08:00" }, { "path": "github.com/aws/aws-sdk-go/private/protocol/query/queryutil", - "revision": "80dd4951fdb3f711d31843b8d87871130ef2df67", - "revisionTime": "2015-12-18T17:19:25-08:00" + "revision": "87b1e60a50b09e4812dee560b33a238f67305804", + "revisionTime": "2016-01-21T14:17:43-08:00" }, { "path": "github.com/aws/aws-sdk-go/private/protocol/rest", - "revision": "80dd4951fdb3f711d31843b8d87871130ef2df67", - "revisionTime": "2015-12-18T17:19:25-08:00" + "revision": "87b1e60a50b09e4812dee560b33a238f67305804", + "revisionTime": "2016-01-21T14:17:43-08:00" }, { "path": "github.com/aws/aws-sdk-go/private/protocol/xml/xmlutil", - "revision": "80dd4951fdb3f711d31843b8d87871130ef2df67", - "revisionTime": "2015-12-18T17:19:25-08:00" + "revision": "87b1e60a50b09e4812dee560b33a238f67305804", + "revisionTime": "2016-01-21T14:17:43-08:00" }, { "path": "github.com/aws/aws-sdk-go/private/signer/v4", - "revision": "80dd4951fdb3f711d31843b8d87871130ef2df67", - "revisionTime": "2015-12-18T17:19:25-08:00" + "revision": "87b1e60a50b09e4812dee560b33a238f67305804", + "revisionTime": "2016-01-21T14:17:43-08:00" }, { "path": "github.com/aws/aws-sdk-go/private/waiter", - "revision": "80dd4951fdb3f711d31843b8d87871130ef2df67", - "revisionTime": "2015-12-18T17:19:25-08:00" + "revision": "87b1e60a50b09e4812dee560b33a238f67305804", + "revisionTime": "2016-01-21T14:17:43-08:00" }, { "path": "github.com/aws/aws-sdk-go/service/ec2", - "revision": "80dd4951fdb3f711d31843b8d87871130ef2df67", - "revisionTime": "2015-12-18T17:19:25-08:00" + "revision": "87b1e60a50b09e4812dee560b33a238f67305804", + "revisionTime": "2016-01-21T14:17:43-08:00" }, { "path": "github.com/aws/aws-sdk-go/service/opsworks", - "revision": "80dd4951fdb3f711d31843b8d87871130ef2df67", - "revisionTime": "2015-12-18T17:19:25-08:00" + "revision": "87b1e60a50b09e4812dee560b33a238f67305804", + "revisionTime": "2016-01-21T14:17:43-08:00" }, { "path": "github.com/codegangsta/cli", - "revision": "b5232bb2934f606f9f27a1305f1eea224e8e8b88", - "revisionTime": "2015-12-12T19:52:18-05:00" + "revision": "f9cc3001e04f9783cb4ad08ca6791aa07134787c", + "revisionTime": "2016-01-21T21:44:38-08:00" }, { "path": "github.com/coreos/go-etcd/etcd", @@ -148,205 +148,201 @@ "revisionTime": "2015-10-26T09:03:18-07:00" }, { + "origin": "github.com/stretchr/testify/vendor/github.com/davecgh/go-spew/spew", "path": "github.com/davecgh/go-spew/spew", "revision": "5215b55f46b2b919f50a1df0eaa5886afe4e3b3d", "revisionTime": "2015-11-05T15:09:06-06:00" }, { "path": "github.com/docker/docker/daemon/graphdriver", - "revision": "67620bc0288ae7ecb12511879e6eb605e98594a1", - "revisionTime": "2015-12-21T14:01:27+01:00" + "revision": "c91045a78bf138aa32acd215fed4084d3b912bec", + "revisionTime": "2016-01-22T11:18:53+01:00" }, { "path": "github.com/docker/docker/daemon/graphdriver/btrfs", - "revision": "67620bc0288ae7ecb12511879e6eb605e98594a1", - "revisionTime": "2015-12-21T14:01:27+01:00" + "revision": "c91045a78bf138aa32acd215fed4084d3b912bec", + "revisionTime": "2016-01-22T11:18:53+01:00" }, { "path": "github.com/docker/docker/daemon/graphdriver/overlay", - "revision": "67620bc0288ae7ecb12511879e6eb605e98594a1", - "revisionTime": "2015-12-21T14:01:27+01:00" + "revision": "c91045a78bf138aa32acd215fed4084d3b912bec", + "revisionTime": "2016-01-22T11:18:53+01:00" }, { "path": "github.com/docker/docker/pkg/archive", - "revision": "67620bc0288ae7ecb12511879e6eb605e98594a1", - "revisionTime": "2015-12-21T14:01:27+01:00" + "revision": "c91045a78bf138aa32acd215fed4084d3b912bec", + "revisionTime": "2016-01-22T11:18:53+01:00" }, { "path": "github.com/docker/docker/pkg/chrootarchive", - "revision": "67620bc0288ae7ecb12511879e6eb605e98594a1", - "revisionTime": "2015-12-21T14:01:27+01:00" + "revision": "c91045a78bf138aa32acd215fed4084d3b912bec", + "revisionTime": "2016-01-22T11:18:53+01:00" }, { "path": "github.com/docker/docker/pkg/directory", - "revision": "67620bc0288ae7ecb12511879e6eb605e98594a1", - "revisionTime": "2015-12-21T14:01:27+01:00" + "revision": "c91045a78bf138aa32acd215fed4084d3b912bec", + "revisionTime": "2016-01-22T11:18:53+01:00" }, { "path": "github.com/docker/docker/pkg/fileutils", - "revision": "67620bc0288ae7ecb12511879e6eb605e98594a1", - "revisionTime": "2015-12-21T14:01:27+01:00" + "revision": "c91045a78bf138aa32acd215fed4084d3b912bec", + "revisionTime": "2016-01-22T11:18:53+01:00" }, { "path": "github.com/docker/docker/pkg/idtools", - "revision": "67620bc0288ae7ecb12511879e6eb605e98594a1", - "revisionTime": "2015-12-21T14:01:27+01:00" + "revision": "c91045a78bf138aa32acd215fed4084d3b912bec", + "revisionTime": "2016-01-22T11:18:53+01:00" }, { "path": "github.com/docker/docker/pkg/ioutils", - "revision": "67620bc0288ae7ecb12511879e6eb605e98594a1", - "revisionTime": "2015-12-21T14:01:27+01:00" + "revision": "c91045a78bf138aa32acd215fed4084d3b912bec", + "revisionTime": "2016-01-22T11:18:53+01:00" }, { "path": "github.com/docker/docker/pkg/longpath", - "revision": "67620bc0288ae7ecb12511879e6eb605e98594a1", - "revisionTime": "2015-12-21T14:01:27+01:00" + "revision": "c91045a78bf138aa32acd215fed4084d3b912bec", + "revisionTime": "2016-01-22T11:18:53+01:00" }, { "path": "github.com/docker/docker/pkg/mount", - "revision": "67620bc0288ae7ecb12511879e6eb605e98594a1", - "revisionTime": "2015-12-21T14:01:27+01:00" + "revision": "c91045a78bf138aa32acd215fed4084d3b912bec", + "revisionTime": "2016-01-22T11:18:53+01:00" }, { "path": "github.com/docker/docker/pkg/parsers", - "revision": "67620bc0288ae7ecb12511879e6eb605e98594a1", - "revisionTime": "2015-12-21T14:01:27+01:00" + "revision": "c91045a78bf138aa32acd215fed4084d3b912bec", + "revisionTime": "2016-01-22T11:18:53+01:00" }, { "path": "github.com/docker/docker/pkg/plugins", - "revision": "67620bc0288ae7ecb12511879e6eb605e98594a1", - "revisionTime": "2015-12-21T14:01:27+01:00" + "revision": "c91045a78bf138aa32acd215fed4084d3b912bec", + "revisionTime": "2016-01-22T11:18:53+01:00" }, { "path": "github.com/docker/docker/pkg/pools", - "revision": "67620bc0288ae7ecb12511879e6eb605e98594a1", - "revisionTime": "2015-12-21T14:01:27+01:00" + "revision": "c91045a78bf138aa32acd215fed4084d3b912bec", + "revisionTime": "2016-01-22T11:18:53+01:00" }, { "path": "github.com/docker/docker/pkg/promise", - "revision": "67620bc0288ae7ecb12511879e6eb605e98594a1", - "revisionTime": "2015-12-21T14:01:27+01:00" + "revision": "c91045a78bf138aa32acd215fed4084d3b912bec", + "revisionTime": "2016-01-22T11:18:53+01:00" }, { "path": "github.com/docker/docker/pkg/reexec", - "revision": "67620bc0288ae7ecb12511879e6eb605e98594a1", - "revisionTime": "2015-12-21T14:01:27+01:00" - }, - { - "path": "github.com/docker/docker/pkg/sockets", - "revision": "67620bc0288ae7ecb12511879e6eb605e98594a1", - "revisionTime": "2015-12-21T14:01:27+01:00" + "revision": "c91045a78bf138aa32acd215fed4084d3b912bec", + "revisionTime": "2016-01-22T11:18:53+01:00" }, { "path": "github.com/docker/docker/pkg/system", - "revision": "67620bc0288ae7ecb12511879e6eb605e98594a1", - "revisionTime": "2015-12-21T14:01:27+01:00" - }, - { - "path": "github.com/docker/docker/pkg/tlsconfig", - "revision": "67620bc0288ae7ecb12511879e6eb605e98594a1", - "revisionTime": "2015-12-21T14:01:27+01:00" + "revision": "c91045a78bf138aa32acd215fed4084d3b912bec", + "revisionTime": "2016-01-22T11:18:53+01:00" }, { "path": "github.com/docker/go-units", - "revision": "651fc226e7441360384da338d0fd37f2440ffbe3", - "revisionTime": "2015-12-18T14:21:30-08:00" + "revision": "0bbddae09c5a5419a8c6dcdd7ff90da3d450393b", + "revisionTime": "2015-12-30T09:58:59-08:00" }, { "path": "github.com/fsouza/go-dockerclient", - "revision": "e0d22d30691bcc996eca51f729a4777b8c7dc2a8", - "revisionTime": "2015-12-21T10:00:12-02:00" + "revision": "296e36969d9d7606c4a6cbcd38eced50a39a0fd1", + "revisionTime": "2016-01-21T18:29:44-05:00" }, { "path": "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus", - "revision": "e0d22d30691bcc996eca51f729a4777b8c7dc2a8", - "revisionTime": "2015-12-21T10:00:12-02:00" + "revision": "296e36969d9d7606c4a6cbcd38eced50a39a0fd1", + "revisionTime": "2016-01-21T18:29:44-05:00" }, { "path": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts", - "revision": "e0d22d30691bcc996eca51f729a4777b8c7dc2a8", - "revisionTime": "2015-12-21T10:00:12-02:00" + "revision": "296e36969d9d7606c4a6cbcd38eced50a39a0fd1", + "revisionTime": "2016-01-21T18:29:44-05:00" }, { "path": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive", - "revision": "e0d22d30691bcc996eca51f729a4777b8c7dc2a8", - "revisionTime": "2015-12-21T10:00:12-02:00" + "revision": "296e36969d9d7606c4a6cbcd38eced50a39a0fd1", + "revisionTime": "2016-01-21T18:29:44-05:00" }, { "path": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils", - "revision": "e0d22d30691bcc996eca51f729a4777b8c7dc2a8", - "revisionTime": "2015-12-21T10:00:12-02:00" + "revision": "296e36969d9d7606c4a6cbcd38eced50a39a0fd1", + "revisionTime": "2016-01-21T18:29:44-05:00" }, { "path": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir", - "revision": "e0d22d30691bcc996eca51f729a4777b8c7dc2a8", - "revisionTime": "2015-12-21T10:00:12-02:00" + "revision": "296e36969d9d7606c4a6cbcd38eced50a39a0fd1", + "revisionTime": "2016-01-21T18:29:44-05:00" }, { "path": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools", - "revision": "e0d22d30691bcc996eca51f729a4777b8c7dc2a8", - "revisionTime": "2015-12-21T10:00:12-02:00" + "revision": "296e36969d9d7606c4a6cbcd38eced50a39a0fd1", + "revisionTime": "2016-01-21T18:29:44-05:00" }, { "path": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils", - "revision": "e0d22d30691bcc996eca51f729a4777b8c7dc2a8", - "revisionTime": "2015-12-21T10:00:12-02:00" + "revision": "296e36969d9d7606c4a6cbcd38eced50a39a0fd1", + "revisionTime": "2016-01-21T18:29:44-05:00" }, { "path": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/longpath", - "revision": "e0d22d30691bcc996eca51f729a4777b8c7dc2a8", - "revisionTime": "2015-12-21T10:00:12-02:00" - }, - { - "path": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/parsers", - "revision": "e0d22d30691bcc996eca51f729a4777b8c7dc2a8", - "revisionTime": "2015-12-21T10:00:12-02:00" + "revision": "296e36969d9d7606c4a6cbcd38eced50a39a0fd1", + "revisionTime": "2016-01-21T18:29:44-05:00" }, { "path": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools", - "revision": "e0d22d30691bcc996eca51f729a4777b8c7dc2a8", - "revisionTime": "2015-12-21T10:00:12-02:00" + "revision": "296e36969d9d7606c4a6cbcd38eced50a39a0fd1", + "revisionTime": "2016-01-21T18:29:44-05:00" }, { "path": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/promise", - "revision": "e0d22d30691bcc996eca51f729a4777b8c7dc2a8", - "revisionTime": "2015-12-21T10:00:12-02:00" + "revision": "296e36969d9d7606c4a6cbcd38eced50a39a0fd1", + "revisionTime": "2016-01-21T18:29:44-05:00" }, { "path": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy", - "revision": "e0d22d30691bcc996eca51f729a4777b8c7dc2a8", - "revisionTime": "2015-12-21T10:00:12-02:00" + "revision": "296e36969d9d7606c4a6cbcd38eced50a39a0fd1", + "revisionTime": "2016-01-21T18:29:44-05:00" }, { "path": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system", - "revision": "e0d22d30691bcc996eca51f729a4777b8c7dc2a8", - "revisionTime": "2015-12-21T10:00:12-02:00" + "revision": "296e36969d9d7606c4a6cbcd38eced50a39a0fd1", + "revisionTime": "2016-01-21T18:29:44-05:00" }, { - "path": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ulimit", - "revision": "e0d22d30691bcc996eca51f729a4777b8c7dc2a8", - "revisionTime": "2015-12-21T10:00:12-02:00" - }, - { - "path": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/units", - "revision": "e0d22d30691bcc996eca51f729a4777b8c7dc2a8", - "revisionTime": "2015-12-21T10:00:12-02:00" + "path": "github.com/fsouza/go-dockerclient/external/github.com/docker/go-units", + "revision": "296e36969d9d7606c4a6cbcd38eced50a39a0fd1", + "revisionTime": "2016-01-21T18:29:44-05:00" }, { "path": "github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp", - "revision": "e0d22d30691bcc996eca51f729a4777b8c7dc2a8", - "revisionTime": "2015-12-21T10:00:12-02:00" + "revision": "296e36969d9d7606c4a6cbcd38eced50a39a0fd1", + "revisionTime": "2016-01-21T18:29:44-05:00" }, { "path": "github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user", - "revision": "e0d22d30691bcc996eca51f729a4777b8c7dc2a8", - "revisionTime": "2015-12-21T10:00:12-02:00" + "revision": "296e36969d9d7606c4a6cbcd38eced50a39a0fd1", + "revisionTime": "2016-01-21T18:29:44-05:00" + }, + { + "path": "github.com/fsouza/go-dockerclient/external/golang.org/x/net/context", + "revision": "296e36969d9d7606c4a6cbcd38eced50a39a0fd1", + "revisionTime": "2016-01-21T18:29:44-05:00" + }, + { + "path": "github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix", + "revision": "296e36969d9d7606c4a6cbcd38eced50a39a0fd1", + "revisionTime": "2016-01-21T18:29:44-05:00" }, { "origin": "github.com/aws/aws-sdk-go/vendor/github.com/go-ini/ini", "path": "github.com/go-ini/ini", - "revision": "6ec4abd8f8d587536da56f730858f0e27aeb4126", - "revisionTime": "2015-12-17T22:34:33-05:00" + "revision": "afbd495e5aaea13597b5e14fe514ddeaa4d76fc3", + "revisionTime": "2016-01-06T18:56:16+08:00" + }, + { + "path": "github.com/golang/protobuf/proto", + "revision": "5fc2294e655b78ed8a02082d37808d46c17d7e64", + "revisionTime": "2016-01-21T14:29:00+11:00" }, { "path": "github.com/gorilla/context", @@ -355,13 +351,13 @@ }, { "path": "github.com/gorilla/mux", - "revision": "9c068cf16d982f8bd444b8c352acbeec34c4fe5b", - "revisionTime": "2015-11-11T18:35:30+08:00" + "revision": "26a6070f849969ba72b72256e9f14cf519751690", + "revisionTime": "2015-12-31T08:19:08-08:00" }, { "path": "github.com/hashicorp/consul/api", - "revision": "4e0b8f2e1adc3773c00bca9f2bf118f33958ac72", - "revisionTime": "2015-12-18T22:54:58-08:00" + "revision": "c353aa9260b695987d45e4e975be52f60da348ed", + "revisionTime": "2016-01-20T16:59:46-08:00" }, { "path": "github.com/hashicorp/go-cleanhttp", @@ -370,8 +366,8 @@ }, { "path": "github.com/hashicorp/serf/coordinate", - "revision": "11bb88abf7b17f0b794b51416a9107d781e95f35", - "revisionTime": "2015-12-18T20:49:27-08:00" + "revision": "64d10e9428bd70dbcd831ad087573b66731c014b", + "revisionTime": "2016-01-19T11:37:02-08:00" }, { "origin": "github.com/aws/aws-sdk-go/vendor/github.com/jmespath/go-jmespath", @@ -396,23 +392,18 @@ }, { "path": "github.com/opencontainers/runc/libcontainer/label", - "revision": "d97d5e8b007e4657316eed76ea30bc0f690230cf", - "revisionTime": "2015-12-19T07:55:24-08:00" + "revision": "4e6893b05a6aa723daa5f9194361032c6beaca25", + "revisionTime": "2016-01-22T09:50:38+08:00" }, { "path": "github.com/opencontainers/runc/libcontainer/selinux", - "revision": "d97d5e8b007e4657316eed76ea30bc0f690230cf", - "revisionTime": "2015-12-19T07:55:24-08:00" + "revision": "4e6893b05a6aa723daa5f9194361032c6beaca25", + "revisionTime": "2016-01-22T09:50:38+08:00" }, { "path": "github.com/opencontainers/runc/libcontainer/system", - "revision": "d97d5e8b007e4657316eed76ea30bc0f690230cf", - "revisionTime": "2015-12-19T07:55:24-08:00" - }, - { - "path": "github.com/opencontainers/runc/libcontainer/user", - "revision": "d97d5e8b007e4657316eed76ea30bc0f690230cf", - "revisionTime": "2015-12-19T07:55:24-08:00" + "revision": "4e6893b05a6aa723daa5f9194361032c6beaca25", + "revisionTime": "2016-01-22T09:50:38+08:00" }, { "path": "github.com/pborman/uuid", @@ -420,9 +411,10 @@ "revisionTime": "2015-12-15T07:05:54-08:00" }, { + "origin": "github.com/stretchr/testify/vendor/github.com/pmezard/go-difflib/difflib", "path": "github.com/pmezard/go-difflib/difflib", - "revision": "e8554b8641db39598be7f6342874b958f12ae1d4", - "revisionTime": "2015-12-07T19:24:34+01:00" + "revision": "792786c7400a136282c1664665ae0a8db921c6c2", + "revisionTime": "2016-01-10T11:55:54+01:00" }, { "path": "github.com/portworx/kvdb", @@ -451,23 +443,43 @@ }, { "path": "github.com/stretchr/testify/assert", - "revision": "e3a8ff8ce36581f87a15341206f205b1da467059", - "revisionTime": "2015-12-07T16:24:04-08:00" + "revision": "f390dcf405f7b83c997eac1b06768bb9f44dec18", + "revisionTime": "2016-01-09T21:38:47+01:00" }, { "path": "github.com/stretchr/testify/require", - "revision": "e3a8ff8ce36581f87a15341206f205b1da467059", - "revisionTime": "2015-12-07T16:24:04-08:00" + "revision": "f390dcf405f7b83c997eac1b06768bb9f44dec18", + "revisionTime": "2016-01-09T21:38:47+01:00" }, { "path": "github.com/ugorji/go/codec", "revision": "646ae4a518c1c3be0739df898118d9bccf993858", "revisionTime": "2015-12-18T14:34:38-05:00" }, + { + "path": "go.pedge.io/dlog", + "revision": "7484f3b321a2704e7a39c5eb75002643e18b2415", + "revisionTime": "2016-01-22T13:24:06-05:00" + }, + { + "path": "go.pedge.io/dlog/logrus", + "revision": "7484f3b321a2704e7a39c5eb75002643e18b2415", + "revisionTime": "2016-01-22T13:24:06-05:00" + }, + { + "path": "go.pedge.io/pb/go/google/protobuf", + "revision": "b1918bb6ce22548082eede5d04eb1618c03be709", + "revisionTime": "2016-01-22T00:13:50-05:00" + }, + { + "path": "go.pedge.io/proto/time", + "revision": "ff06a6e997a8616d58dceb168f5b6aa81be63f4a", + "revisionTime": "2016-01-21T20:53:50-05:00" + }, { "path": "golang.org/x/net/context", - "revision": "6c89489cafabcbc76df9dbf84ebf07204673fecf", - "revisionTime": "2015-12-19T16:51:57+06:00" + "revision": "2e9cee70ee697e0a2ef894b560dda50dec7dff58", + "revisionTime": "2016-01-21T16:20:05Z" }, { "path": "gopkg.in/jmcvetta/napping.v3", diff --git a/volume/blockdriver.go b/volume/blockdriver.go index 4962d5335..828c62ed1 100644 --- a/volume/blockdriver.go +++ b/volume/blockdriver.go @@ -1,19 +1,14 @@ package volume -import ( - "github.com/libopenstorage/openstorage/api" -) - // DefaultBlockDriver is a default (null) block driver implementation. This can be // used by drivers that do not want to (or care about) implementing the attach, // format and detach interfaces. -type DefaultBlockDriver struct { -} +type DefaultBlockDriver struct {} -func (d *DefaultBlockDriver) Attach(volumeID api.VolumeID) (path string, err error) { +func (d *DefaultBlockDriver) Attach(volumeID string) (string, error) { return "", ErrNotSupported } -func (d *DefaultBlockDriver) Detach(volumeID api.VolumeID) error { +func (d *DefaultBlockDriver) Detach(volumeID string) error { return ErrNotSupported } diff --git a/volume/drivers/aws/aws.go b/volume/drivers/aws/aws.go index e74effa8b..66f4eeca5 100644 --- a/volume/drivers/aws/aws.go +++ b/volume/drivers/aws/aws.go @@ -10,7 +10,9 @@ import ( "syscall" "time" - "github.com/Sirupsen/logrus" + "go.pedge.io/dlog" + "go.pedge.io/proto/time" + "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws/credentials" "github.com/aws/aws-sdk-go/aws/session" @@ -20,12 +22,13 @@ import ( "github.com/libopenstorage/openstorage/pkg/chaos" "github.com/libopenstorage/openstorage/pkg/device" "github.com/libopenstorage/openstorage/volume" + "github.com/libopenstorage/openstorage/volume/drivers/common" "github.com/portworx/kvdb" ) const ( Name = "aws" - Type = api.Block + Type = api.DriverType_DRIVER_TYPE_BLOCK AwsDBKey = "OpenStorageAWSKey" ) @@ -59,7 +62,7 @@ func Init(params volume.DriverParams) (volume.VolumeDriver, error) { if err != nil { return nil, err } - logrus.Infof("AWS instance %v zone %v", instance, zone) + dlog.Infof("AWS instance %v zone %v", instance, zone) accessKey, ok := params["AWS_ACCESS_KEY_ID"] if !ok { if accessKey = os.Getenv("AWS_ACCESS_KEY_ID"); accessKey == "" { @@ -142,40 +145,33 @@ func (d *Driver) freeDevices() (string, string, error) { } // mapCos translates a CoS specified in spec to a volume. -func mapCos(cos api.VolumeCos) (*int64, *string) { - volType := opsworks.VolumeTypeIo1 - if cos < 2 { - // General purpose SSDs don't have provisioned IOPS - volType = opsworks.VolumeTypeGp2 - return nil, &volType - } - // AWS provisioned IOPS range is 100 - 20000. +func mapCos(cos uint32) (*int64, *string) { var iops int64 - if cos < 7 { - iops = 10000 - } else { - iops = 20000 + var volType string + switch { + case cos < 2: + iops, volType = 0, opsworks.VolumeTypeGp2 + case cos < 7: + iops, volType = 10000, opsworks.VolumeTypeIo1 + default: + iops, volType = 20000, opsworks.VolumeTypeIo1 } return &iops, &volType } // metadata retrieves instance metadata specified by key. func metadata(key string) (string, error) { - client := http.Client{Timeout: time.Second * 10} url := "http://169.254.169.254/latest/meta-data/" + key - res, err := client.Get(url) if err != nil { return "", err } defer res.Body.Close() - if res.StatusCode != 200 { err = fmt.Errorf("Code %d returned for url %s", res.StatusCode, url) return "", fmt.Errorf("Error querying AWS metadata for key %s: %v", key, err) } - body, err := ioutil.ReadAll(res.Body) if err != nil { return "", fmt.Errorf("Error querying AWS metadata for key %s: %v", key, err) @@ -183,7 +179,6 @@ func metadata(key string) (string, error) { if len(body) == 0 { return "", fmt.Errorf("Failed to retrieve AWS metadata for key %s: %v", key, err) } - return string(body), nil } @@ -224,12 +219,11 @@ func (v *Driver) Status() [][2]string { // Create aws volume from spec. func (d *Driver) Create( - locator api.VolumeLocator, + locator *api.VolumeLocator, source *api.Source, - spec *api.VolumeSpec) (api.VolumeID, error) { - + spec *api.VolumeSpec, +) (string, error) { var snapID *string - // Spec size is in bytes, translate to GiB. sz := int64(spec.Size / (1024 * 1024 * 1024)) iops, volType := mapCos(spec.Cos) @@ -239,7 +233,6 @@ func (d *Driver) Create( } dryRun := false encrypted := false - req := &ec2.CreateVolumeInput{ AvailabilityZone: &d.md.zone, DryRun: &dryRun, @@ -249,51 +242,50 @@ func (d *Driver) Create( VolumeType: volType, SnapshotId: snapID, } - vol, err := d.ec2.CreateVolume(req) if err != nil { - logrus.Warnf("Failed in CreateVolumeRequest :%v", err) - return api.BadVolumeID, err - } - v := &api.Volume{ - ID: api.VolumeID(*vol.VolumeId), - Locator: locator, - Ctime: time.Now(), - Spec: spec, - Source: source, - LastScan: time.Now(), - Format: "none", - State: api.VolumeAvailable, - Status: api.Up, + dlog.Warnf("Failed in CreateVolumeRequest :%v", err) + return "", err } - err = d.UpdateVol(v) - err = d.waitStatus(v.ID, ec2.VolumeStateAvailable) - return v.ID, err + volume := common.NewVolume( + *vol.VolumeId, + api.FSType_FS_TYPE_NONE, + locator, + source, + spec, + + ) + err = d.UpdateVol(volume) + if err != nil { + return "", err + } + err = d.waitStatus(volume.Id, ec2.VolumeStateAvailable) + return volume.Id, err } // merge volume properties from aws into volume. func (d *Driver) merge(v *api.Volume, aws *ec2.Volume) { - v.AttachedOn = api.MachineID("") - v.State = api.VolumeDetached + v.AttachedOn = "" + v.State = api.VolumeState_VOLUME_STATE_DETACHED v.DevicePath = "" switch *aws.State { case ec2.VolumeStateAvailable: - v.Status = api.Up + v.Status = api.VolumeStatus_VOLUME_STATUS_UP case ec2.VolumeStateCreating, ec2.VolumeStateDeleting: - v.State = api.VolumePending - v.Status = api.Down + v.State = api.VolumeState_VOLUME_STATE_PENDING + v.Status = api.VolumeStatus_VOLUME_STATUS_DOWN case ec2.VolumeStateDeleted: - v.State = api.VolumeDeleted - v.Status = api.Down + v.State = api.VolumeState_VOLUME_STATE_DELETED + v.Status = api.VolumeStatus_VOLUME_STATUS_DOWN case ec2.VolumeStateError: - v.State = api.VolumeError - v.Status = api.Down + v.State = api.VolumeState_VOLUME_STATE_ERROR + v.Status = api.VolumeStatus_VOLUME_STATUS_DOWN case ec2.VolumeStateInUse: - v.Status = api.Up + v.Status = api.VolumeStatus_VOLUME_STATUS_UP if aws.Attachments != nil && len(aws.Attachments) != 0 { if aws.Attachments[0].InstanceId != nil { - v.AttachedOn = api.MachineID(*aws.Attachments[0].InstanceId) + v.AttachedOn = *aws.Attachments[0].InstanceId } if aws.Attachments[0].State != nil { v.State = d.volumeState(aws.Attachments[0].State) @@ -305,9 +297,9 @@ func (d *Driver) merge(v *api.Volume, aws *ec2.Volume) { } } -func (d *Driver) waitStatus(volumeID api.VolumeID, desired string) error { +func (d *Driver) waitStatus(volumeID string, desired string) error { - id := string(volumeID) + id := volumeID request := &ec2.DescribeVolumesInput{VolumeIds: []*string{&id}} actual := "" @@ -337,11 +329,11 @@ func (d *Driver) waitStatus(volumeID api.VolumeID, desired string) error { } func (d *Driver) waitAttachmentStatus( - volumeID api.VolumeID, + volumeID string, desired string, timeout time.Duration) error { - id := string(volumeID) + id := volumeID request := &ec2.DescribeVolumesInput{VolumeIds: []*string{&id}} actual := "" interval := 2 * time.Second @@ -380,9 +372,9 @@ func (d *Driver) waitAttachmentStatus( return nil } -func (d *Driver) devicePath(volumeID api.VolumeID) (string, error) { +func (d *Driver) devicePath(volumeID string) (string, error) { - awsVolID := string(volumeID) + awsVolID := volumeID request := &ec2.DescribeVolumesInput{VolumeIds: []*string{&awsVolID}} awsVols, err := d.ec2.DescribeVolumes(request) @@ -390,7 +382,7 @@ func (d *Driver) devicePath(volumeID api.VolumeID) (string, error) { return "", err } if awsVols == nil || len(awsVols.Volumes) == 0 { - return "", fmt.Errorf("Failed to retrieve volume for ID %q", string(volumeID)) + return "", fmt.Errorf("Failed to retrieve volume for ID %q", volumeID) } aws := awsVols.Volumes[0] @@ -422,14 +414,14 @@ func (d *Driver) devicePath(volumeID api.VolumeID) (string, error) { return dev, nil } -func (d *Driver) Inspect(volumeIDs []api.VolumeID) ([]api.Volume, error) { +func (d *Driver) Inspect(volumeIDs []string) ([]*api.Volume, error) { vols, err := d.DefaultEnumerator.Inspect(volumeIDs) if err != nil { return nil, err } var ids []*string = make([]*string, len(vols)) for i, v := range vols { - id := string(v.ID) + id := v.Id ids[i] = &id } request := &ec2.DescribeVolumesInput{VolumeIds: ids} @@ -441,16 +433,16 @@ func (d *Driver) Inspect(volumeIDs []api.VolumeID) ([]api.Volume, error) { return nil, fmt.Errorf("AwsVols (%v) do not match recorded vols (%v)", awsVols, vols) } for i, v := range awsVols.Volumes { - if string(vols[i].ID) != *v.VolumeId { - d.merge(&vols[i], v) + if string(vols[i].Id) != *v.VolumeId { + d.merge(vols[i], v) } } return vols, nil } -func (d *Driver) Delete(volumeID api.VolumeID) error { +func (d *Driver) Delete(volumeID string) error { dryRun := false - id := string(volumeID) + id := volumeID req := &ec2.DeleteVolumeInput{ VolumeId: &id, DryRun: &dryRun, @@ -462,50 +454,50 @@ func (d *Driver) Delete(volumeID api.VolumeID) error { return nil } -func (d *Driver) Snapshot(volumeID api.VolumeID, readonly bool, locator api.VolumeLocator) (api.VolumeID, error) { +func (d *Driver) Snapshot(volumeID string, readonly bool, locator *api.VolumeLocator) (string, error) { dryRun := false - vols, err := d.DefaultEnumerator.Inspect([]api.VolumeID{volumeID}) + vols, err := d.DefaultEnumerator.Inspect([]string{volumeID}) if err != nil { - return api.BadVolumeID, err + return "", err } if len(vols) != 1 { - return api.BadVolumeID, fmt.Errorf("Failed to inspect %v len %v", volumeID, len(vols)) + return "", fmt.Errorf("Failed to inspect %v len %v", volumeID, len(vols)) } - awsID := string(volumeID) + awsID := volumeID request := &ec2.CreateSnapshotInput{ VolumeId: &awsID, DryRun: &dryRun, } snap, err := d.ec2.CreateSnapshot(request) chaos.Now(koStrayCreate) - vols[0].ID = api.VolumeID(*snap.SnapshotId) + vols[0].Id = *snap.SnapshotId vols[0].Source = &api.Source{Parent: volumeID} vols[0].Locator = locator - vols[0].Ctime = time.Now() + vols[0].Ctime = prototime.Now() chaos.Now(koStrayCreate) - err = d.CreateVol(&vols[0]) + err = d.CreateVol(vols[0]) if err != nil { - return api.BadVolumeID, err + return "", err } - return vols[0].ID, nil + return vols[0].Id, nil } -func (d *Driver) Stats(volumeID api.VolumeID) (api.Stats, error) { - return api.Stats{}, volume.ErrNotSupported +func (d *Driver) Stats(volumeID string) (*api.Stats, error) { + return nil, volume.ErrNotSupported } -func (d *Driver) Alerts(volumeID api.VolumeID) (api.Alerts, error) { - return api.Alerts{}, volume.ErrNotSupported +func (d *Driver) Alerts(volumeID string) (*api.Alerts, error) { + return nil, volume.ErrNotSupported } -func (d *Driver) Attach(volumeID api.VolumeID) (path string, err error) { +func (d *Driver) Attach(volumeID string) (path string, err error) { dryRun := false device, err := d.Assign() if err != nil { return "", err } - awsVolID := string(volumeID) + awsVolID := volumeID req := &ec2.AttachVolumeInput{ DryRun: &dryRun, Device: &device, @@ -522,25 +514,25 @@ func (d *Driver) Attach(volumeID api.VolumeID) (path string, err error) { func (d *Driver) volumeState(ec2VolState *string) api.VolumeState { if ec2VolState == nil { - return api.VolumeDetached + return api.VolumeState_VOLUME_STATE_DETACHED } switch *ec2VolState { case ec2.VolumeAttachmentStateAttached: - return api.VolumeAttached + return api.VolumeState_VOLUME_STATE_ATTACHED case ec2.VolumeAttachmentStateDetached: - return api.VolumeDetached + return api.VolumeState_VOLUME_STATE_DETACHED case ec2.VolumeAttachmentStateAttaching, ec2.VolumeAttachmentStateDetaching: - return api.VolumePending + return api.VolumeState_VOLUME_STATE_PENDING default: - logrus.Warnf("Failed to translate EC2 volume status %v", ec2VolState) + dlog.Warnf("Failed to translate EC2 volume status %v", ec2VolState) } - return api.VolumeError + return api.VolumeState_VOLUME_STATE_ERROR } -func (d *Driver) Format(volumeID api.VolumeID) error { +func (d *Driver) Format(volumeID string) error { v, err := d.GetVol(volumeID) if err != nil { - return fmt.Errorf("Failed to locate volume %q", string(volumeID)) + return fmt.Errorf("Failed to locate volume %q", volumeID) } // XXX: determine mount state @@ -551,7 +543,7 @@ func (d *Driver) Format(volumeID api.VolumeID) error { cmd := "/sbin/mkfs." + string(v.Spec.Format) o, err := exec.Command(cmd, devicePath).Output() if err != nil { - logrus.Warnf("Failed to run command %v %v: %v", cmd, devicePath, o) + dlog.Warnf("Failed to run command %v %v: %v", cmd, devicePath, o) return err } v.Format = v.Spec.Format @@ -559,9 +551,9 @@ func (d *Driver) Format(volumeID api.VolumeID) error { return err } -func (d *Driver) Detach(volumeID api.VolumeID) error { +func (d *Driver) Detach(volumeID string) error { force := false - awsVolID := string(volumeID) + awsVolID := volumeID req := &ec2.DetachVolumeInput{ InstanceId: &d.md.instance, VolumeId: &awsVolID, @@ -575,10 +567,10 @@ func (d *Driver) Detach(volumeID api.VolumeID) error { return err } -func (d *Driver) Mount(volumeID api.VolumeID, mountpath string) error { +func (d *Driver) Mount(volumeID string, mountpath string) error { v, err := d.GetVol(volumeID) if err != nil { - return fmt.Errorf("Failed to locate volume %q", string(volumeID)) + return fmt.Errorf("Failed to locate volume %q", volumeID) } devicePath, err := d.devicePath(volumeID) if err != nil { @@ -591,17 +583,17 @@ func (d *Driver) Mount(volumeID api.VolumeID, mountpath string) error { return nil } -func (d *Driver) Unmount(volumeID api.VolumeID, mountpath string) error { +func (d *Driver) Unmount(volumeID string, mountpath string) error { // XXX: determine if valid mount path err := syscall.Unmount(mountpath, 0) return err } func (d *Driver) Shutdown() { - logrus.Printf("%s Shutting down", Name) + dlog.Printf("%s Shutting down", Name) } -func (d *Driver) Set(volumeID api.VolumeID, locator *api.VolumeLocator, spec *api.VolumeSpec) error { +func (d *Driver) Set(volumeID string, locator *api.VolumeLocator, spec *api.VolumeSpec) error { return volume.ErrNotSupported } diff --git a/volume/drivers/aws/aws_test.go b/volume/drivers/aws/aws_test.go index 566b170c0..67971829f 100644 --- a/volume/drivers/aws/aws_test.go +++ b/volume/drivers/aws/aws_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/aws/aws-sdk-go/aws/credentials" + "github.com/libopenstorage/openstorage/api" "github.com/libopenstorage/openstorage/volume" "github.com/libopenstorage/openstorage/volume/drivers/test" ) @@ -21,6 +22,6 @@ func TestAll(t *testing.T) { t.Fatalf("Failed to initialize Volume Driver: %v", err) } ctx := test.NewContext(d) - ctx.Filesystem = "ext4" + ctx.Filesystem = api.FSType_FS_TYPE_EXT4 test.RunShort(t, ctx) } diff --git a/volume/drivers/btrfs/btrfs.go b/volume/drivers/btrfs/btrfs.go index 3af05342b..2286b0881 100644 --- a/volume/drivers/btrfs/btrfs.go +++ b/volume/drivers/btrfs/btrfs.go @@ -4,23 +4,24 @@ package btrfs import ( "fmt" - "path" + "path/filepath" "syscall" - "time" - "github.com/Sirupsen/logrus" + "go.pedge.io/proto/time" + "github.com/docker/docker/daemon/graphdriver" "github.com/docker/docker/daemon/graphdriver/btrfs" "github.com/libopenstorage/openstorage/api" "github.com/libopenstorage/openstorage/pkg/chaos" "github.com/libopenstorage/openstorage/volume" + "github.com/libopenstorage/openstorage/volume/drivers/common" "github.com/pborman/uuid" "github.com/portworx/kvdb" ) const ( Name = "btrfs" - Type = api.File + Type = api.DriverType_DRIVER_TYPE_FILE RootParam = "home" Volumes = "volumes" ) @@ -43,17 +44,17 @@ func Init(params volume.DriverParams) (volume.VolumeDriver, error) { if !ok { return nil, fmt.Errorf("Root directory should be specified with key %q", RootParam) } - home := path.Join(root, Volumes) + home := filepath.Join(root, "volumes") d, err := btrfs.Init(home, nil, nil, nil) if err != nil { return nil, err } - s := volume.NewDefaultEnumerator(Name, kvdb.Instance()) return &driver{ btrfs: d, root: root, IoNotSupported: &volume.IoNotSupported{}, - DefaultEnumerator: s}, nil + DefaultEnumerator: volume.NewDefaultEnumerator(Name, kvdb.Instance()), + }, nil } func (d *driver) String() string { @@ -70,79 +71,61 @@ func (d *driver) Type() api.DriverType { } // Create a new subvolume. The volume spec is not taken into account. -func (d *driver) Create(locator api.VolumeLocator, +func (d *driver) Create( + locator *api.VolumeLocator, source *api.Source, - spec *api.VolumeSpec) (api.VolumeID, error) { - - if spec.Format != "btrfs" && spec.Format != "" { - return api.BadVolumeID, fmt.Errorf("Filesystem format (%v) must be %v", - spec.Format, "btrfs") - } - - volumeID := uuid.New() - - v := &api.Volume{ - ID: api.VolumeID(volumeID), - Locator: locator, - Ctime: time.Now(), - Spec: spec, - Source: source, - LastScan: time.Now(), - Format: "btrfs", - State: api.VolumeAvailable, - Status: api.Up, - } - err := d.CreateVol(v) - if err != nil { - return api.BadVolumeID, err - } - err = d.btrfs.Create(volumeID, "", "") - if err != nil { - return api.BadVolumeID, err - } - v.DevicePath, err = d.btrfs.Get(volumeID, "") + spec *api.VolumeSpec, +) (string, error) { + if spec.Format != api.FSType_FS_TYPE_BTRFS && spec.Format != api.FSType_FS_TYPE_NONE { + return "", fmt.Errorf("Filesystem format (%v) must be %v", spec.Format.SimpleString(), api.FSType_FS_TYPE_BTRFS.SimpleString()) + } + volume := common.NewVolume( + uuid.New(), + api.FSType_FS_TYPE_BTRFS, + locator, + source, + spec, + + ) + if err := d.CreateVol(volume); err != nil { + return "", err + } + if err := d.btrfs.Create(volume.Id, "", ""); err != nil { + return "", err + } + devicePath, err := d.btrfs.Get(volume.Id, "") if err != nil { - return v.ID, err + return volume.Id, err } - err = d.UpdateVol(v) - return v.ID, err + volume.DevicePath = devicePath + err = d.UpdateVol(volume) + return volume.Id, err } // Delete subvolume -func (d *driver) Delete(volumeID api.VolumeID) error { - err := d.DeleteVol(volumeID) - if err != nil { - logrus.Println(err) +func (d *driver) Delete(volumeID string) error { + if err := d.DeleteVol(volumeID); err != nil { return err } - chaos.Now(koStrayDelete) - if err == nil { - err = d.btrfs.Remove(string(volumeID)) - } - return err + return d.btrfs.Remove(volumeID) } // Mount bind mount btrfs subvolume -func (d *driver) Mount(volumeID api.VolumeID, mountpath string) error { +func (d *driver) Mount(volumeID string, mountpath string) error { v, err := d.GetVol(volumeID) if err != nil { - logrus.Println(err) return err } - err = syscall.Mount(v.DevicePath, mountpath, string(v.Format), syscall.MS_BIND, "") - if err != nil { + if err := syscall.Mount(v.DevicePath, mountpath, v.Format.SimpleString(), syscall.MS_BIND, ""); err != nil { return fmt.Errorf("Failed to mount %v at %v: %v", v.DevicePath, mountpath, err) } - v.AttachPath = mountpath - err = d.UpdateVol(v) - - return err + return d.UpdateVol(v) } // Unmount btrfs subvolume -func (d *driver) Unmount(volumeID api.VolumeID, mountpath string) error { +func (d *driver) Unmount(volumeID string, mountpath string) error { v, err := d.GetVol(volumeID) if err != nil { return err @@ -150,16 +133,14 @@ func (d *driver) Unmount(volumeID api.VolumeID, mountpath string) error { if v.AttachPath == "" { return fmt.Errorf("Device %v not mounted", volumeID) } - err = syscall.Unmount(v.AttachPath, 0) - if err != nil { + if err := syscall.Unmount(v.AttachPath, 0); err != nil { return err } v.AttachPath = "" - err = d.UpdateVol(v) - return err + return d.UpdateVol(v) } -func (d *driver) Set(volumeID api.VolumeID, locator *api.VolumeLocator, spec *api.VolumeSpec) error { +func (d *driver) Set(volumeID string, locator *api.VolumeLocator, spec *api.VolumeSpec) error { if spec != nil { return volume.ErrNotSupported } @@ -168,47 +149,45 @@ func (d *driver) Set(volumeID api.VolumeID, locator *api.VolumeLocator, spec *ap return err } if locator != nil { - v.Locator = *locator + v.Locator = locator } - err = d.UpdateVol(v) - return err + return d.UpdateVol(v) } // Snapshot create new subvolume from volume -func (d *driver) Snapshot(volumeID api.VolumeID, readonly bool, locator api.VolumeLocator) (api.VolumeID, error) { - vols, err := d.Inspect([]api.VolumeID{volumeID}) +func (d *driver) Snapshot(volumeID string, readonly bool, locator *api.VolumeLocator) (string, error) { + vols, err := d.Inspect([]string{volumeID}) if err != nil { - return api.BadVolumeID, err + return "", err } if len(vols) != 1 { - return api.BadVolumeID, fmt.Errorf("Failed to inspect %v len %v", volumeID, len(vols)) + return "", fmt.Errorf("Failed to inspect %v len %v", volumeID, len(vols)) } snapID := uuid.New() - vols[0].ID = api.VolumeID(snapID) + vols[0].Id = snapID vols[0].Source = &api.Source{Parent: volumeID} vols[0].Locator = locator - vols[0].Ctime = time.Now() + vols[0].Ctime = prototime.Now() - err = d.CreateVol(&vols[0]) - if err != nil { - return api.BadVolumeID, err + if err := d.CreateVol(vols[0]); err != nil { + return "", err } chaos.Now(koStrayCreate) - err = d.btrfs.Create(snapID, string(volumeID), "") + err = d.btrfs.Create(snapID, volumeID, "") if err != nil { - return api.BadVolumeID, err + return "", err } - return vols[0].ID, nil + return vols[0].Id, nil } // Stats for specified volume. -func (d *driver) Stats(volumeID api.VolumeID) (api.Stats, error) { - return api.Stats{}, nil +func (d *driver) Stats(volumeID string) (*api.Stats, error) { + return nil, nil } // Alerts on this volume. -func (d *driver) Alerts(volumeID api.VolumeID) (api.Alerts, error) { - return api.Alerts{}, nil +func (d *driver) Alerts(volumeID string) (*api.Alerts, error) { + return nil, nil } // Shutdown and cleanup. diff --git a/volume/drivers/btrfs/btrfs_test.go b/volume/drivers/btrfs/btrfs_test.go index d458933cf..3110f0d16 100644 --- a/volume/drivers/btrfs/btrfs_test.go +++ b/volume/drivers/btrfs/btrfs_test.go @@ -7,6 +7,7 @@ import ( "os/exec" "testing" + "github.com/libopenstorage/openstorage/api" "github.com/libopenstorage/openstorage/volume" "github.com/libopenstorage/openstorage/volume/drivers/test" ) @@ -54,6 +55,6 @@ func TestAll(t *testing.T) { t.Fatalf("failed to initialize VolumeDriver: %v", err) } ctx := test.NewContext(volumeDriver) - ctx.Filesystem = "btrfs" + ctx.Filesystem = api.FSType_FS_TYPE_BTRFS test.Run(t, ctx) } diff --git a/volume/drivers/btrfs/unsupported.go b/volume/drivers/btrfs/unsupported.go index 543afdaea..e780e7dd1 100644 --- a/volume/drivers/btrfs/unsupported.go +++ b/volume/drivers/btrfs/unsupported.go @@ -10,10 +10,9 @@ import ( ) const ( - Name = "btrfs" - Type = api.File + Name = "btrfs" + Type = api.DriverType_DRIVER_TYPE_FILE RootParam = "home" - Volumes = "volumes" ) var ( diff --git a/volume/drivers/buse/buse.go b/volume/drivers/buse/buse.go index 2e41a8489..d32eb35ab 100644 --- a/volume/drivers/buse/buse.go +++ b/volume/drivers/buse/buse.go @@ -8,23 +8,28 @@ import ( "path" "strings" "syscall" - "time" - "github.com/Sirupsen/logrus" + "go.pedge.io/dlog" + "github.com/libopenstorage/openstorage/api" "github.com/libopenstorage/openstorage/cluster" "github.com/libopenstorage/openstorage/volume" + "github.com/libopenstorage/openstorage/volume/drivers/common" "github.com/pborman/uuid" "github.com/portworx/kvdb" ) const ( Name = "buse" - Type = api.Block + Type = api.DriverType_DRIVER_TYPE_BLOCK BuseDBKey = "OpenStorageBuseKey" BuseMountPath = "/var/lib/openstorage/buse/" ) +func init() { + volume.Register(Name, Init) +} + // Implements the open storage volume interface. type driver struct { *volume.IoNotSupported @@ -79,37 +84,34 @@ func Init(params volume.DriverParams) (volume.VolumeDriver, error) { IoNotSupported: &volume.IoNotSupported{}, DefaultEnumerator: volume.NewDefaultEnumerator(Name, kvdb.Instance()), } - inst.buseDevices = make(map[string]*buseDev) - - err := os.MkdirAll(BuseMountPath, 0744) - if err != nil { + if err := os.MkdirAll(BuseMountPath, 0744); err != nil { return nil, err } - volumeInfo, err := inst.DefaultEnumerator.Enumerate( - api.VolumeLocator{}, - nil) + &api.VolumeLocator{}, + nil, + ) if err == nil { for _, info := range volumeInfo { - if info.Status == "" { - info.Status = api.Up - inst.UpdateVol(&info) + if info.Status == api.VolumeStatus_VOLUME_STATUS_NONE { + info.Status = api.VolumeStatus_VOLUME_STATUS_UP + inst.UpdateVol(info) } } } else { - logrus.Println("Could not enumerate Volumes, ", err) + dlog.Println("Could not enumerate Volumes, ", err) } c, err := cluster.Inst() if err != nil { - logrus.Println("BUSE initializing in single node mode") + dlog.Println("BUSE initializing in single node mode") } else { - logrus.Println("BUSE initializing in clustered mode") + dlog.Println("BUSE initializing in clustered mode") c.AddEventListener(inst) } - logrus.Println("BUSE initialized and driver mounted at: ", BuseMountPath) + dlog.Println("BUSE initialized and driver mounted at: ", BuseMountPath) return inst, nil } @@ -130,89 +132,81 @@ func (d *driver) Status() [][2]string { return [][2]string{} } -func (d *driver) Create(locator api.VolumeLocator, source *api.Source, spec *api.VolumeSpec) (api.VolumeID, error) { +func (d *driver) Create(locator *api.VolumeLocator, source *api.Source, spec *api.VolumeSpec) (string, error) { volumeID := uuid.New() volumeID = strings.TrimSuffix(volumeID, "\n") - if spec.Size == 0 { - return api.BadVolumeID, fmt.Errorf("Volume size cannot be zero", "buse") + return "", fmt.Errorf("Volume size cannot be zero", "buse") } - - if spec.Format == "" { - return api.BadVolumeID, fmt.Errorf("Missing volume format", "buse") + if spec.Format == api.FSType_FS_TYPE_NONE { + return "", fmt.Errorf("Missing volume format", "buse") } - // Create a file on the local buse path with this UUID. - buseFile := path.Join(BuseMountPath, string(volumeID)) + buseFile := path.Join(BuseMountPath, volumeID) f, err := os.Create(buseFile) if err != nil { - logrus.Println(err) - return api.BadVolumeID, err + dlog.Println(err) + return "", err } - err = f.Truncate(int64(spec.Size)) - if err != nil { - logrus.Println(err) - return api.BadVolumeID, err + if err := f.Truncate(int64(spec.Size)); err != nil { + dlog.Println(err) + return "", err } bd := &buseDev{ file: buseFile, - f: f} - + f: f, + } nbd := Create(bd, int64(spec.Size)) bd.nbd = nbd - logrus.Infof("Connecting to NBD...") + dlog.Infof("Connecting to NBD...") dev, err := bd.nbd.Connect() if err != nil { - logrus.Println(err) - return api.BadVolumeID, err + dlog.Println(err) + return "", err } - logrus.Infof("Formatting %s with %v", dev, spec.Format) - cmd := "/sbin/mkfs." + string(spec.Format) + dlog.Infof("Formatting %s with %v", dev, spec.Format) + cmd := "/sbin/mkfs." + spec.Format.SimpleString() o, err := exec.Command(cmd, dev).Output() if err != nil { - logrus.Warnf("Failed to run command %v %v: %v", cmd, dev, o) - return api.BadVolumeID, err + dlog.Warnf("Failed to run command %v %v: %v", cmd, dev, o) + return "", err } - logrus.Infof("BUSE mapped NBD device %s (size=%v) to block file %s", dev, spec.Size, buseFile) - - v := &api.Volume{ - ID: api.VolumeID(volumeID), - Source: source, - Locator: locator, - Ctime: time.Now(), - Spec: spec, - LastScan: time.Now(), - Format: spec.Format, - State: api.VolumeAvailable, - Status: api.Up, - DevicePath: dev, - } + dlog.Infof("BUSE mapped NBD device %s (size=%v) to block file %s", dev, spec.Size, buseFile) + + v := common.NewVolume( + volumeID, + spec.Format, + locator, + source, + spec, + ) + v.DevicePath = dev d.buseDevices[dev] = bd err = d.CreateVol(v) if err != nil { - return api.BadVolumeID, err + return "", err } - return v.ID, err + return v.Id, err } -func (d *driver) Delete(volumeID api.VolumeID) error { +func (d *driver) Delete(volumeID string) error { v, err := d.GetVol(volumeID) if err != nil { - logrus.Println(err) + dlog.Println(err) return err } bd, ok := d.buseDevices[v.DevicePath] if !ok { err = fmt.Errorf("Cannot locate a BUSE device for %s", v.DevicePath) - logrus.Println(err) + dlog.Println(err) return err } @@ -221,37 +215,37 @@ func (d *driver) Delete(volumeID api.VolumeID) error { bd.f.Close() bd.nbd.Disconnect() - logrus.Infof("BUSE deleted volume %v at NBD device %s", volumeID, v.DevicePath) + dlog.Infof("BUSE deleted volume %v at NBD device %s", volumeID, v.DevicePath) - err = d.DeleteVol(volumeID) - if err != nil { - logrus.Println(err) + if err := d.DeleteVol(volumeID); err != nil { + dlog.Println(err) return err } return nil } -func (d *driver) Mount(volumeID api.VolumeID, mountpath string) error { +func (d *driver) Mount(volumeID string, mountpath string) error { v, err := d.GetVol(volumeID) if err != nil { - return fmt.Errorf("Failed to locate volume %q", string(volumeID)) + return fmt.Errorf("Failed to locate volume %q", volumeID) } - err = syscall.Mount(v.DevicePath, mountpath, string(v.Spec.Format), 0, "") - if err != nil { - logrus.Errorf("Mounting %s on %s failed because of %v", v.DevicePath, mountpath, err) + if err := syscall.Mount(v.DevicePath, mountpath, v.Spec.Format.SimpleString(), 0, ""); err != nil { + // TODO(pedge): same string for log message and error? + dlog.Errorf("Mounting %s on %s failed because of %v", v.DevicePath, mountpath, err) return fmt.Errorf("Failed to mount %v at %v: %v", v.DevicePath, mountpath, err) } - logrus.Infof("BUSE mounted NBD device %s at %s", v.DevicePath, mountpath) + dlog.Infof("BUSE mounted NBD device %s at %s", v.DevicePath, mountpath) v.AttachPath = mountpath + // TODO(pedge): why ignoring the error? err = d.UpdateVol(v) return nil } -func (d *driver) Unmount(volumeID api.VolumeID, mountpath string) error { +func (d *driver) Unmount(volumeID string, mountpath string) error { v, err := d.GetVol(volumeID) if err != nil { return err @@ -259,40 +253,38 @@ func (d *driver) Unmount(volumeID api.VolumeID, mountpath string) error { if v.AttachPath == "" { return fmt.Errorf("Device %v not mounted", volumeID) } - err = syscall.Unmount(v.AttachPath, 0) - if err != nil { + if err := syscall.Unmount(v.AttachPath, 0); err != nil { return err } v.AttachPath = "" - err = d.UpdateVol(v) - return err + return d.UpdateVol(v) } -func (d *driver) Snapshot(volumeID api.VolumeID, readonly bool, locator api.VolumeLocator) (api.VolumeID, error) { - volIDs := make([]api.VolumeID, 1) +func (d *driver) Snapshot(volumeID string, readonly bool, locator *api.VolumeLocator) (string, error) { + volIDs := make([]string, 1) volIDs[0] = volumeID vols, err := d.Inspect(volIDs) if err != nil { - return api.BadVolumeID, nil + return "", nil } source := &api.Source{Parent: volumeID} newVolumeID, err := d.Create(locator, source, vols[0].Spec) if err != nil { - return api.BadVolumeID, nil + return "", nil } // BUSE does not support snapshots, so just copy the block files. - err = copyFile(BuseMountPath+string(volumeID), BuseMountPath+string(newVolumeID)) + err = copyFile(BuseMountPath+volumeID, BuseMountPath+newVolumeID) if err != nil { d.Delete(newVolumeID) - return api.BadVolumeID, nil + return "", nil } return newVolumeID, nil } -func (d *driver) Set(volumeID api.VolumeID, locator *api.VolumeLocator, spec *api.VolumeSpec) error { +func (d *driver) Set(volumeID string, locator *api.VolumeLocator, spec *api.VolumeSpec) error { if spec != nil { return volume.ErrNotSupported } @@ -301,32 +293,31 @@ func (d *driver) Set(volumeID api.VolumeID, locator *api.VolumeLocator, spec *ap return err } if locator != nil { - v.Locator = *locator + v.Locator = locator } - err = d.UpdateVol(v) - return err + return d.UpdateVol(v) } -func (d *driver) Attach(volumeID api.VolumeID) (string, error) { +func (d *driver) Attach(volumeID string) (string, error) { // Nothing to do on attach. - return path.Join(BuseMountPath, string(volumeID)), nil + return path.Join(BuseMountPath, volumeID), nil } -func (d *driver) Detach(volumeID api.VolumeID) error { +func (d *driver) Detach(volumeID string) error { // Nothing to do on detach. return nil } -func (d *driver) Stats(volumeID api.VolumeID) (api.Stats, error) { - return api.Stats{}, volume.ErrNotSupported +func (d *driver) Stats(volumeID string) (*api.Stats, error) { + return nil, volume.ErrNotSupported } -func (d *driver) Alerts(volumeID api.VolumeID) (api.Alerts, error) { - return api.Alerts{}, volume.ErrNotSupported +func (d *driver) Alerts(volumeID string) (*api.Alerts, error) { + return nil, volume.ErrNotSupported } func (d *driver) Shutdown() { - logrus.Printf("%s Shutting down", Name) + dlog.Printf("%s Shutting down", Name) syscall.Unmount(BuseMountPath, 0) } diff --git a/volume/drivers/buse/nbd.go b/volume/drivers/buse/nbd.go index 6f697ac9c..99b9b28f5 100644 --- a/volume/drivers/buse/nbd.go +++ b/volume/drivers/buse/nbd.go @@ -13,7 +13,7 @@ import ( "sync" "syscall" - "github.com/Sirupsen/logrus" + "go.pedge.io/dlog" ) const ( @@ -140,7 +140,7 @@ func (nbd *NBD) Connect() (dev string, err error) { continue // Busy. } - logrus.Infof("Attempting to open device %v", dev) + dlog.Infof("Attempting to open device %v", dev) if nbd.deviceFile, err = os.Open(dev); err == nil { // Possible candidate. ioctl(nbd.deviceFile.Fd(), BLKROSET, 0) @@ -190,7 +190,7 @@ func (nbd *NBD) connect() { // NBD_CONNECT does not return until disconnect. ioctl(nbd.deviceFile.Fd(), NBD_CONNECT, 0) - logrus.Infof("Closing device file %s", nbd.devicePath) + dlog.Infof("Closing device file %s", nbd.devicePath) } // Handle block requests. @@ -201,12 +201,12 @@ func (nbd *NBD) handle() { for { bytes, err := syscall.Read(nbd.socket, buf[0:28]) if nbd.deviceFile == nil { - logrus.Infof("Disconnecting device %s", nbd.devicePath) + dlog.Infof("Disconnecting device %s", nbd.devicePath) return } if bytes < 0 || err != nil { - logrus.Errorf("Error reading from device %s", nbd.devicePath) + dlog.Errorf("Error reading from device %s", nbd.devicePath) nbd.Disconnect() return } @@ -238,7 +238,7 @@ func (nbd *NBD) handle() { binary.BigEndian.PutUint32(buf[4:8], 0) syscall.Write(nbd.socket, buf[0:16]) case NBD_CMD_DISC: - logrus.Infof("Disconnecting device %s", nbd.devicePath) + dlog.Infof("Disconnecting device %s", nbd.devicePath) nbd.Disconnect() return case NBD_CMD_FLUSH: @@ -248,12 +248,12 @@ func (nbd *NBD) handle() { binary.BigEndian.PutUint32(buf[4:8], 1) syscall.Write(nbd.socket, buf[0:16]) default: - logrus.Errorf("Unknown command recieved on device %s", nbd.devicePath) + dlog.Errorf("Unknown command recieved on device %s", nbd.devicePath) nbd.Disconnect() return } default: - logrus.Errorf("Invalid packet command recieved on device %s", nbd.devicePath) + dlog.Errorf("Invalid packet command recieved on device %s", nbd.devicePath) nbd.Disconnect() return } diff --git a/volume/drivers/common/common.go b/volume/drivers/common/common.go new file mode 100644 index 000000000..12d7c293e --- /dev/null +++ b/volume/drivers/common/common.go @@ -0,0 +1,28 @@ +package common + +import ( + "go.pedge.io/proto/time" + + "github.com/libopenstorage/openstorage/api" +) + +// NewVolume returns a new api.Volume for a driver Create call. +func NewVolume( + volumeID string, + fsType api.FSType, + volumeLocator *api.VolumeLocator, + source *api.Source, + volumeSpec *api.VolumeSpec, +) *api.Volume { + return &api.Volume{ + Id: volumeID, + Locator: volumeLocator, + Ctime: prototime.Now(), + Spec: volumeSpec, + Source: source, + LastScan: prototime.Now(), + Format: fsType, + State: api.VolumeState_VOLUME_STATE_AVAILABLE, + Status: api.VolumeStatus_VOLUME_STATUS_UP, + } +} diff --git a/volume/drivers/coprhd/coprhd.go b/volume/drivers/coprhd/coprhd.go index 4389d9811..f3f232b6f 100644 --- a/volume/drivers/coprhd/coprhd.go +++ b/volume/drivers/coprhd/coprhd.go @@ -6,19 +6,17 @@ import ( "net/http" "net/url" + "go.pedge.io/dlog" - "github.com/Sirupsen/logrus" "github.com/portworx/kvdb" - "github.com/libopenstorage/openstorage/api" "github.com/libopenstorage/openstorage/volume" - "gopkg.in/jmcvetta/napping.v3" ) const ( Name = "coprhd" - Type = api.Block + Type = api.DriverType_DRIVER_TYPE_BLOCK // LoginUri path to create a authentication token loginUri = "login.json" @@ -62,8 +60,8 @@ type ( CreateVolumeReply struct { Task []struct { Resource struct { - Name string `json:"name"` - Id api.VolumeID `json:"id"` + Name string `json:"name"` + Id string `json:"id"` } `json:"resource"` } `json:"task"` } @@ -136,15 +134,15 @@ func (d *driver) Type() api.DriverType { } func (d *driver) Create( - locator api.VolumeLocator, + locator *api.VolumeLocator, source *api.Source, - spec *api.VolumeSpec) (api.VolumeID, error) { + spec *api.VolumeSpec) (string, error) { s, err := d.getAuthSession() if err != nil { - logrus.Errorf("Failed to create session: %s", err.Error()) - return api.BadVolumeID, err + dlog.Errorf("Failed to create session: %s", err.Error()) + return "", err } e := ApiError{} @@ -169,50 +167,56 @@ func (d *driver) Create( if resp.Status() != http.StatusAccepted { - return api.BadVolumeID, fmt.Errorf("Failed to create volume: %s", resp.Status()) + return "", fmt.Errorf("Failed to create volume: %s", resp.Status()) } return res.Task[0].Resource.Id, err } -func (d *driver) Delete(volumeID api.VolumeID) error { +func (d *driver) Delete(volumeID string) error { return nil } -func (d *driver) Stats(volumeID api.VolumeID) (api.Stats, error) { - return api.Stats{}, volume.ErrNotSupported +func (d *driver) Stats(volumeID string) (*api.Stats, error) { + return nil, volume.ErrNotSupported } -func (d *driver) Alerts(volumeID api.VolumeID) (api.Alerts, error) { - return api.Alerts{}, volume.ErrNotSupported +func (d *driver) Alerts(volumeID string) (*api.Alerts, error) { + return nil, volume.ErrNotSupported } -func (d *driver) Attach(volumeID api.VolumeID) (path string, err error) { +func (d *driver) Attach(volumeID string) (path string, err error) { return "", nil } -func (d *driver) Detach(volumeID api.VolumeID) error { +func (d *driver) Detach(volumeID string) error { return nil } -func (d *driver) Mount(volumeID api.VolumeID, mountpath string) error { +func (d *driver) Mount(volumeID string, mountpath string) error { return nil } -func (d *driver) Unmount(volumeID api.VolumeID, mountpath string) error { +func (d *driver) Unmount(volumeID string, mountpath string) error { return nil } -func (d *driver) Set(volumeID api.VolumeID, locator *api.VolumeLocator, spec *api.VolumeSpec) error { +func (d *driver) Set( + volumeID string, + locator *api.VolumeLocator, + spec *api.VolumeSpec) error { return volume.ErrNotSupported } func (d *driver) Shutdown() { - logrus.Infof("%s Shutting down", Name) + dlog.Infof("%s Shutting down", Name) } -func (d *driver) Snapshot(volumeID api.VolumeID, readonly bool, locator api.VolumeLocator) (api.VolumeID, error) { +func (d *driver) Snapshot( + volumeID string, + readonly bool, + locator *api.VolumeLocator) (string, error) { return "", nil } diff --git a/volume/drivers/fuse/volume_driver.go b/volume/drivers/fuse/volume_driver.go index dd5b714b7..35a00f210 100644 --- a/volume/drivers/fuse/volume_driver.go +++ b/volume/drivers/fuse/volume_driver.go @@ -5,13 +5,13 @@ import ( "os" "path/filepath" "strings" - "time" "bazil.org/fuse" "bazil.org/fuse/fs" "github.com/libopenstorage/openstorage/api" "github.com/libopenstorage/openstorage/volume" + "github.com/libopenstorage/openstorage/volume/drivers/common" "github.com/pborman/uuid" "github.com/portworx/kvdb" ) @@ -50,40 +50,37 @@ func (v *volumeDriver) String() string { } func (v *volumeDriver) Type() api.DriverType { - return api.File + return api.DriverType_DRIVER_TYPE_FILE } func (v *volumeDriver) Create( - volumeLocator api.VolumeLocator, + volumeLocator *api.VolumeLocator, source *api.Source, spec *api.VolumeSpec, -) (api.VolumeID, error) { +) (string, error) { volumeID := strings.TrimSpace(string(uuid.New())) dirPath := filepath.Join(v.baseDirPath, volumeID) if err := os.MkdirAll(dirPath, 0777); err != nil { - return api.BadVolumeID, err - } - volume := &api.Volume{ - ID: api.VolumeID(volumeID), - Locator: volumeLocator, - Ctime: time.Now(), - Spec: spec, - LastScan: time.Now(), - Format: "fuse", - State: api.VolumeAvailable, - Status: api.Up, - DevicePath: dirPath, + return "", err } + volume := common.NewVolume( + volumeID, + api.FSType_FS_TYPE_FUSE, + volumeLocator, + source, + spec, + ) + volume.DevicePath = dirPath if err := v.CreateVol(volume); err != nil { - return api.BadVolumeID, err + return "", err } if err := v.UpdateVol(volume); err != nil { - return api.BadVolumeID, err + return "", err } - return volume.ID, nil + return volume.Id, nil } -func (v *volumeDriver) Delete(volumeID api.VolumeID) error { +func (v *volumeDriver) Delete(volumeID string) error { if _, err := v.GetVol(volumeID); err != nil { return err } @@ -93,7 +90,7 @@ func (v *volumeDriver) Delete(volumeID api.VolumeID) error { return v.DeleteVol(volumeID) } -func (v *volumeDriver) Mount(volumeID api.VolumeID, mountpath string) error { +func (v *volumeDriver) Mount(volumeID string, mountpath string) error { volume, err := v.GetVol(volumeID) if err != nil { return err @@ -119,7 +116,7 @@ func (v *volumeDriver) Mount(volumeID api.VolumeID, mountpath string) error { return conn.MountError } -func (v *volumeDriver) Unmount(volumeID api.VolumeID, mountpath string) error { +func (v *volumeDriver) Unmount(volumeID string, mountpath string) error { volume, err := v.GetVol(volumeID) if err != nil { return err @@ -134,22 +131,21 @@ func (v *volumeDriver) Unmount(volumeID api.VolumeID, mountpath string) error { return v.UpdateVol(volume) } -func (v *volumeDriver) Set(volumeID api.VolumeID, locator *api.VolumeLocator, spec *api.VolumeSpec) error { +func (v *volumeDriver) Set(volumeID string, locator *api.VolumeLocator, spec *api.VolumeSpec) error { return volume.ErrNotSupported } -func (v *volumeDriver) Stats(volumeID api.VolumeID) (api.Stats, error) { - return api.Stats{}, volume.ErrNotSupported +func (v *volumeDriver) Stats(volumeID string) (*api.Stats, error) { + return nil, volume.ErrNotSupported } -func (v *volumeDriver) Alerts(volumeID api.VolumeID) (api.Alerts, error) { - return api.Alerts{}, volume.ErrNotSupported +func (v *volumeDriver) Alerts(volumeID string) (*api.Alerts, error) { + return nil, volume.ErrNotSupported } func (v *volumeDriver) Status() [][2]string { return [][2]string{} } -func (v *volumeDriver) Shutdown() { -} +func (v *volumeDriver) Shutdown() {} diff --git a/volume/drivers/nfs/nfs.go b/volume/drivers/nfs/nfs.go index 4fc574674..59fa39c45 100644 --- a/volume/drivers/nfs/nfs.go +++ b/volume/drivers/nfs/nfs.go @@ -8,26 +8,31 @@ import ( "path" "strings" "syscall" - "time" - "github.com/Sirupsen/logrus" + "go.pedge.io/dlog" + "github.com/libopenstorage/openstorage/api" "github.com/libopenstorage/openstorage/config" "github.com/libopenstorage/openstorage/pkg/mount" "github.com/libopenstorage/openstorage/pkg/seed" "github.com/libopenstorage/openstorage/volume" + "github.com/libopenstorage/openstorage/volume/drivers/common" "github.com/pborman/uuid" "github.com/portworx/kvdb" ) const ( Name = "nfs" - Type = api.File + Type = api.DriverType_DRIVER_TYPE_FILE NfsDBKey = "OpenStorageNFSKey" nfsMountPath = "/var/lib/openstorage/nfs/" nfsBlockFile = ".blockdevice" ) +func init() { + volume.Register(Name, Init) +} + // Implements the open storage volume interface. type driver struct { *volume.IoNotSupported @@ -37,95 +42,23 @@ type driver struct { mounter mount.Manager } -func copyFile(source string, dest string) (err error) { - sourcefile, err := os.Open(source) - if err != nil { - return err - } - - defer sourcefile.Close() - - destfile, err := os.Create(dest) - if err != nil { - return err - } - - defer destfile.Close() - - _, err = io.Copy(destfile, sourcefile) - if err == nil { - sourceinfo, err := os.Stat(source) - if err != nil { - err = os.Chmod(dest, sourceinfo.Mode()) - } - - } - - return -} - -func copyDir(source string, dest string) (err error) { - // get properties of source dir - sourceinfo, err := os.Stat(source) - if err != nil { - return err - } - - // create dest dir - - err = os.MkdirAll(dest, sourceinfo.Mode()) - if err != nil { - return err - } - - directory, _ := os.Open(source) - - objects, err := directory.Readdir(-1) - - for _, obj := range objects { - - sourcefilepointer := source + "/" + obj.Name() - - destinationfilepointer := dest + "/" + obj.Name() - - if obj.IsDir() { - // create sub-directories - recursively - err = copyDir(sourcefilepointer, destinationfilepointer) - if err != nil { - fmt.Println(err) - } - } else { - // perform copy - err = copyFile(sourcefilepointer, destinationfilepointer) - if err != nil { - fmt.Println(err) - } - } - - } - return -} - func Init(params volume.DriverParams) (volume.VolumeDriver, error) { path, ok := params["path"] if !ok { return nil, errors.New("No NFS path provided") } - server, ok := params["server"] if !ok { - logrus.Printf("No NFS server provided, will attempt to bind mount %s", path) + dlog.Printf("No NFS server provided, will attempt to bind mount %s", path) } else { - logrus.Printf("NFS driver initializing with %s:%s ", server, path) + dlog.Printf("NFS driver initializing with %s:%s ", server, path) } - // Create a mount manager for this NFS server. Blank sever is OK. mounter, err := mount.New(mount.NFSMount, server) if err != nil { - logrus.Warnf("Failed to create mount manager for server: %v (%v)", server, err) + dlog.Warnf("Failed to create mount manager for server: %v (%v)", server, err) return nil, err } - inst := &driver{ IoNotSupported: &volume.IoNotSupported{}, DefaultEnumerator: volume.NewDefaultEnumerator(Name, kvdb.Instance()), @@ -133,17 +66,13 @@ func Init(params volume.DriverParams) (volume.VolumeDriver, error) { nfsPath: path, mounter: mounter, } - - err = os.MkdirAll(nfsMountPath, 0744) - if err != nil { + if err := os.MkdirAll(nfsMountPath, 0744); err != nil { return nil, err } - src := inst.nfsPath if server != "" { src = ":" + inst.nfsPath } - // If src is already mounted at dest, leave it be. mountExists, err := mounter.Exists(src, nfsMountPath) if !mountExists { @@ -155,24 +84,23 @@ func Init(params volume.DriverParams) (volume.VolumeDriver, error) { err = syscall.Mount(src, nfsMountPath, "", syscall.MS_BIND, "") } if err != nil { - logrus.Printf("Unable to mount %s:%s at %s (%+v)", inst.nfsServer, inst.nfsPath, nfsMountPath, err) + dlog.Printf("Unable to mount %s:%s at %s (%+v)", inst.nfsServer, inst.nfsPath, nfsMountPath, err) return nil, err } } - - volumeInfo, err := inst.DefaultEnumerator.Enumerate(api.VolumeLocator{}, nil) + volumeInfo, err := inst.DefaultEnumerator.Enumerate(&api.VolumeLocator{}, nil) if err == nil { for _, info := range volumeInfo { - if info.Status == "" { - info.Status = api.Up - inst.UpdateVol(&info) + if info.Status == api.VolumeStatus_VOLUME_STATUS_NONE { + info.Status = api.VolumeStatus_VOLUME_STATUS_UP + inst.UpdateVol(info) } } } else { - logrus.Println("Could not enumerate Volumes, ", err) + dlog.Println("Could not enumerate Volumes, ", err) } - logrus.Println("NFS initialized and driver mounted at: ", nfsMountPath) + dlog.Println("NFS initialized and driver mounted at: ", nfsMountPath) return inst, nil } @@ -193,7 +121,11 @@ func (d *driver) Status() [][2]string { // These functions below implement the volume driver interface. // -func (d *driver) Create(locator api.VolumeLocator, source *api.Source, spec *api.VolumeSpec) (api.VolumeID, error) { +func (d *driver) Create( + locator *api.VolumeLocator, + source *api.Source, + spec *api.VolumeSpec) (string, error) { + volumeID := uuid.New() volumeID = strings.TrimSuffix(volumeID, "\n") @@ -201,63 +133,57 @@ func (d *driver) Create(locator api.VolumeLocator, source *api.Source, spec *api volPath := path.Join(nfsMountPath, volumeID) err := os.MkdirAll(volPath, 0744) if err != nil { - logrus.Println(err) - return api.BadVolumeID, err + dlog.Println(err) + return "", err } if source != nil { if len(source.Seed) != 0 { seed, err := seed.New(source.Seed, spec.ConfigLabels) if err != nil { - logrus.Warnf("Failed to initailize seed from %q : %v", + dlog.Warnf("Failed to initailize seed from %q : %v", source.Seed, err) - return api.BadVolumeID, err + return "", err } err = seed.Load(path.Join(volPath, config.DataDir)) if err != nil { - logrus.Warnf("Failed to seed from %q to %q: %v", + dlog.Warnf("Failed to seed from %q to %q: %v", source.Seed, nfsMountPath, err) - return api.BadVolumeID, err + return "", err } } } - f, err := os.Create(path.Join(nfsMountPath, string(volumeID)+nfsBlockFile)) + f, err := os.Create(path.Join(nfsMountPath, volumeID+nfsBlockFile)) if err != nil { - logrus.Println(err) - return api.BadVolumeID, err + dlog.Println(err) + return "", err } defer f.Close() - err = f.Truncate(int64(spec.Size)) - if err != nil { - logrus.Println(err) - return api.BadVolumeID, err + if err := f.Truncate(int64(spec.Size)); err != nil { + dlog.Println(err) + return "", err } - v := &api.Volume{ - ID: api.VolumeID(volumeID), - Source: source, - Locator: locator, - Ctime: time.Now(), - Spec: spec, - LastScan: time.Now(), - Format: "nfs", - State: api.VolumeAvailable, - Status: api.Up, - DevicePath: path.Join(nfsMountPath, string(volumeID)+nfsBlockFile), - } + v := common.NewVolume( + volumeID, + api.FSType_FS_TYPE_NFS, + locator, + source, + spec, + ) + v.DevicePath = path.Join(nfsMountPath, volumeID+nfsBlockFile) - err = d.CreateVol(v) - if err != nil { - return api.BadVolumeID, err + if err := d.CreateVol(v); err != nil { + return "", err } - return v.ID, err + return v.Id, err } -func (d *driver) Delete(volumeID api.VolumeID) error { +func (d *driver) Delete(volumeID string) error { v, err := d.GetVol(volumeID) if err != nil { - logrus.Println(err) + dlog.Println(err) return err } @@ -265,43 +191,38 @@ func (d *driver) Delete(volumeID api.VolumeID) error { os.Remove(v.DevicePath) // Delete the directory on the nfs server. - os.RemoveAll(path.Join(nfsMountPath, string(volumeID))) + os.RemoveAll(path.Join(nfsMountPath, volumeID)) err = d.DeleteVol(volumeID) if err != nil { - logrus.Println(err) + dlog.Println(err) return err } return nil } -func (d *driver) Mount(volumeID api.VolumeID, mountpath string) error { +func (d *driver) Mount(volumeID string, mountpath string) error { v, err := d.GetVol(volumeID) if err != nil { - logrus.Println(err) + dlog.Println(err) return err } - srcPath := path.Join(":", d.nfsPath, string(volumeID)) + srcPath := path.Join(":", d.nfsPath, volumeID) mountExists, err := d.mounter.Exists(srcPath, mountpath) if !mountExists { - d.mounter.Unmount(path.Join(nfsMountPath, string(volumeID)), mountpath) - err = d.mounter.Mount(0, path.Join(nfsMountPath, string(volumeID)), mountpath, string(v.Spec.Format), syscall.MS_BIND, "") - if err != nil { - logrus.Printf("Cannot mount %s at %s because %+v", - path.Join(nfsMountPath, string(volumeID)), mountpath, err) + d.mounter.Unmount(path.Join(nfsMountPath, volumeID), mountpath) + if err := d.mounter.Mount(0, path.Join(nfsMountPath, volumeID), mountpath, string(v.Spec.Format), syscall.MS_BIND, ""); err != nil { + dlog.Printf("Cannot mount %s at %s because %+v", path.Join(nfsMountPath, volumeID), mountpath, err) return err } } - v.AttachPath = mountpath - err = d.UpdateVol(v) - - return err + return d.UpdateVol(v) } -func (d *driver) Unmount(volumeID api.VolumeID, mountpath string) error { +func (d *driver) Unmount(volumeID string, mountpath string) error { v, err := d.GetVol(volumeID) if err != nil { return err @@ -309,7 +230,7 @@ func (d *driver) Unmount(volumeID api.VolumeID, mountpath string) error { if v.AttachPath == "" { return fmt.Errorf("Device %v not mounted", volumeID) } - err = d.mounter.Unmount(path.Join(nfsMountPath, string(volumeID)), v.AttachPath) + err = d.mounter.Unmount(path.Join(nfsMountPath, volumeID), v.AttachPath) if err != nil { return err } @@ -318,38 +239,35 @@ func (d *driver) Unmount(volumeID api.VolumeID, mountpath string) error { return err } -func (d *driver) Snapshot(volumeID api.VolumeID, readonly bool, locator api.VolumeLocator) (api.VolumeID, error) { - volIDs := make([]api.VolumeID, 1) - volIDs[0] = volumeID +func (d *driver) Snapshot(volumeID string, readonly bool, locator *api.VolumeLocator) (string, error) { + volIDs := []string{volumeID} vols, err := d.Inspect(volIDs) if err != nil { - return api.BadVolumeID, nil + return "", nil } source := &api.Source{Parent: volumeID} newVolumeID, err := d.Create(locator, source, vols[0].Spec) if err != nil { - return api.BadVolumeID, nil + return "", nil } // NFS does not support snapshots, so just copy the files. - err = copyDir(nfsMountPath+string(volumeID), nfsMountPath+string(newVolumeID)) - if err != nil { + if err := copyDir(nfsMountPath+volumeID, nfsMountPath+newVolumeID); err != nil { d.Delete(newVolumeID) - return api.BadVolumeID, nil + return "", nil } - return newVolumeID, nil } -func (d *driver) Attach(volumeID api.VolumeID) (string, error) { - return path.Join(nfsMountPath, string(volumeID)+nfsBlockFile), nil +func (d *driver) Attach(volumeID string) (string, error) { + return path.Join(nfsMountPath, volumeID+nfsBlockFile), nil } -func (d *driver) Detach(volumeID api.VolumeID) error { +func (d *driver) Detach(volumeID string) error { return nil } -func (d *driver) Set(volumeID api.VolumeID, locator *api.VolumeLocator, spec *api.VolumeSpec) error { +func (d *driver) Set(volumeID string, locator *api.VolumeLocator, spec *api.VolumeSpec) error { if spec != nil { return volume.ErrNotSupported } @@ -358,26 +276,89 @@ func (d *driver) Set(volumeID api.VolumeID, locator *api.VolumeLocator, spec *ap return err } if locator != nil { - v.Locator = *locator + v.Locator = locator } - err = d.UpdateVol(v) - return err + return d.UpdateVol(v) } -func (d *driver) Stats(volumeID api.VolumeID) (api.Stats, error) { - return api.Stats{}, volume.ErrNotSupported +func (d *driver) Stats(volumeID string) (*api.Stats, error) { + return nil, volume.ErrNotSupported } -func (d *driver) Alerts(volumeID api.VolumeID) (api.Alerts, error) { - return api.Alerts{}, volume.ErrNotSupported +func (d *driver) Alerts(volumeID string) (*api.Alerts, error) { + return nil, volume.ErrNotSupported } func (d *driver) Shutdown() { - logrus.Printf("%s Shutting down", Name) + dlog.Printf("%s Shutting down", Name) syscall.Unmount(nfsMountPath, 0) } -func init() { - // Register ourselves as an openstorage volume driver. - volume.Register(Name, Init) +func copyFile(source string, dest string) (err error) { + sourcefile, err := os.Open(source) + if err != nil { + return err + } + + defer sourcefile.Close() + + destfile, err := os.Create(dest) + if err != nil { + return err + } + + defer destfile.Close() + + _, err = io.Copy(destfile, sourcefile) + if err == nil { + sourceinfo, err := os.Stat(source) + if err != nil { + err = os.Chmod(dest, sourceinfo.Mode()) + } + + } + + return +} + +func copyDir(source string, dest string) (err error) { + // get properties of source dir + sourceinfo, err := os.Stat(source) + if err != nil { + return err + } + + // create dest dir + + err = os.MkdirAll(dest, sourceinfo.Mode()) + if err != nil { + return err + } + + directory, _ := os.Open(source) + + objects, err := directory.Readdir(-1) + + for _, obj := range objects { + + sourcefilepointer := source + "/" + obj.Name() + + destinationfilepointer := dest + "/" + obj.Name() + + if obj.IsDir() { + // create sub-directories - recursively + err = copyDir(sourcefilepointer, destinationfilepointer) + if err != nil { + fmt.Println(err) + } + } else { + // perform copy + err = copyFile(sourcefilepointer, destinationfilepointer) + if err != nil { + fmt.Println(err) + } + } + + } + return } diff --git a/volume/drivers/nfs/nfs_test.go b/volume/drivers/nfs/nfs_test.go index 5767785b1..23af0e47b 100644 --- a/volume/drivers/nfs/nfs_test.go +++ b/volume/drivers/nfs/nfs_test.go @@ -4,6 +4,7 @@ import ( "os" "testing" + "github.com/libopenstorage/openstorage/api" "github.com/libopenstorage/openstorage/volume" "github.com/libopenstorage/openstorage/volume/drivers/test" ) @@ -27,7 +28,7 @@ func TestAll(t *testing.T) { t.Fatalf("Failed to initialize Volume Driver: %v", err) } ctx := test.NewContext(d) - ctx.Filesystem = "nfs" + ctx.Filesystem = api.FSType_FS_TYPE_NFS test.RunShort(t, ctx) } diff --git a/volume/drivers/pwx/pwx.go b/volume/drivers/pwx/pwx.go index 15606a797..f42fe7c09 100644 --- a/volume/drivers/pwx/pwx.go +++ b/volume/drivers/pwx/pwx.go @@ -9,9 +9,8 @@ import ( const ( Name = "pwx" - Type = api.Block + Type = api.DriverType_DRIVER_TYPE_BLOCK DefaultUrl = "unix:///" + config.DriverAPIBase + "pxd.sock" - DefaultVersion = "v1" ) type driver struct { @@ -27,7 +26,7 @@ func Init(params volume.DriverParams) (volume.VolumeDriver, error) { } version, ok := params[config.VersionKey] if !ok { - version = DefaultVersion + version = config.Version } c, err := client.NewClient(url, version) if err != nil { diff --git a/volume/drivers/test/driver.go b/volume/drivers/test/driver.go index 08c096889..5440c8aad 100644 --- a/volume/drivers/test/driver.go +++ b/volume/drivers/test/driver.go @@ -8,23 +8,24 @@ import ( "testing" "time" - "github.com/Sirupsen/logrus" + "go.pedge.io/dlog" + "github.com/libopenstorage/openstorage/api" "github.com/libopenstorage/openstorage/volume" "github.com/portworx/kvdb" "github.com/portworx/kvdb/mem" - "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) // Context maintains current device state. It gets passed into tests // so that tests can build on other tests' work type Context struct { volume.VolumeDriver - volID api.VolumeID - snapID api.VolumeID + volID string + snapID string mountPath string devicePath string - Filesystem string + Filesystem api.FSType testPath string testFile string } @@ -32,9 +33,9 @@ type Context struct { func NewContext(d volume.VolumeDriver) *Context { return &Context{ VolumeDriver: d, - volID: api.BadVolumeID, - snapID: api.BadVolumeID, - Filesystem: string(""), + volID: "", + snapID: "", + Filesystem: api.FSType_FS_TYPE_NONE, testPath: path.Join("/tmp/openstorage/mount/", d.String()), testFile: path.Join("/tmp/", d.String()), } @@ -81,80 +82,80 @@ func create(t *testing.T, ctx *Context) { fmt.Println("create") volID, err := ctx.Create( - api.VolumeLocator{Name: "foo", VolumeLabels: api.Labels{"oh": "create"}}, + &api.VolumeLocator{Name: "foo", VolumeLabels: map[string]string{"oh": "create"}}, nil, &api.VolumeSpec{ Size: 1 * 1024 * 1024 * 1024, - HALevel: 1, - Format: api.Filesystem(ctx.Filesystem), + HaLevel: 1, + Format: ctx.Filesystem, }) - assert.NoError(t, err, "Failed in Create") + require.NoError(t, err, "Failed in Create") ctx.volID = volID } func inspect(t *testing.T, ctx *Context) { fmt.Println("inspect") - vols, err := ctx.Inspect([]api.VolumeID{ctx.volID}) - assert.NoError(t, err, "Failed in Inspect") - assert.NotNil(t, vols, "Nil vols") - assert.Equal(t, len(vols), 1, "Expect 1 volume actual %v volumes", len(vols)) - assert.Equal(t, vols[0].ID, ctx.volID, "Expect volID %v actual %v", ctx.volID, vols[0].ID) + vols, err := ctx.Inspect([]string{ctx.volID}) + require.NoError(t, err, "Failed in Inspect") + require.NotNil(t, vols, "Nil vols") + require.Equal(t, len(vols), 1, "Expect 1 volume actual %v volumes", len(vols)) + require.Equal(t, vols[0].Id, ctx.volID, "Expect volID %v actual %v", ctx.volID, vols[0].Id) - vols, err = ctx.Inspect([]api.VolumeID{api.VolumeID("shouldNotExist")}) - assert.Equal(t, 0, len(vols), "Expect 0 volume actual %v volumes", len(vols)) + vols, err = ctx.Inspect([]string{string("shouldNotExist")}) + require.Equal(t, 0, len(vols), "Expect 0 volume actual %v volumes", len(vols)) } func set(t *testing.T, ctx *Context) { fmt.Println("update") - vols, err := ctx.Inspect([]api.VolumeID{ctx.volID}) - assert.NoError(t, err, "Failed in Inspect") - assert.NotNil(t, vols, "Nil vols") - assert.Equal(t, len(vols), 1, "Expect 1 volume actual %v volumes", len(vols)) - assert.Equal(t, vols[0].ID, ctx.volID, "Expect volID %v actual %v", ctx.volID, vols[0].ID) + vols, err := ctx.Inspect([]string{ctx.volID}) + require.NoError(t, err, "Failed in Inspect") + require.NotNil(t, vols, "Nil vols") + require.Equal(t, len(vols), 1, "Expect 1 volume actual %v volumes", len(vols)) + require.Equal(t, vols[0].Id, ctx.volID, "Expect volID %v actual %v", ctx.volID, vols[0].Id) vols[0].Locator.VolumeLabels["UpdateTest"] = "Success" - err = ctx.Set(ctx.volID, &vols[0].Locator, nil) - assert.NoError(t, err, "Failed in Update") - - vols, err = ctx.Inspect([]api.VolumeID{ctx.volID}) - assert.NoError(t, err, "Failed in Inspect") - assert.NotNil(t, vols, "Nil vols") - assert.Equal(t, len(vols), 1, "Expect 1 volume actual %v volumes", len(vols)) - assert.Equal(t, vols[0].Locator.VolumeLabels["UpdateTest"], "Success", + err = ctx.Set(ctx.volID, vols[0].Locator, nil) + require.NoError(t, err, "Failed in Update") + + vols, err = ctx.Inspect([]string{ctx.volID}) + require.NoError(t, err, "Failed in Inspect") + require.NotNil(t, vols, "Nil vols") + require.Equal(t, len(vols), 1, "Expect 1 volume actual %v volumes", len(vols)) + require.Equal(t, vols[0].Locator.VolumeLabels["UpdateTest"], "Success", "Expect Label %v actual %v", "UpdateTest", vols[0].Locator.VolumeLabels) } func enumerate(t *testing.T, ctx *Context) { fmt.Println("enumerate") - vols, err := ctx.Enumerate(api.VolumeLocator{}, nil) - assert.NoError(t, err, "Failed in Enumerate") - assert.NotNil(t, vols, "Nil vols") - assert.Equal(t, 1, len(vols), "Expect 1 volume actual %v volumes", len(vols)) - assert.Equal(t, vols[0].ID, ctx.volID, "Expect volID %v actual %v", ctx.volID, vols[0].ID) + vols, err := ctx.Enumerate(&api.VolumeLocator{}, nil) + require.NoError(t, err, "Failed in Enumerate") + require.NotNil(t, vols, "Nil vols") + require.Equal(t, 1, len(vols), "Expect 1 volume actual %v volumes", len(vols)) + require.Equal(t, vols[0].Id, ctx.volID, "Expect volID %v actual %v", ctx.volID, vols[0].Id) - vols, err = ctx.Enumerate(api.VolumeLocator{Name: "foo"}, nil) - assert.NoError(t, err, "Failed in Enumerate") - assert.NotNil(t, vols, "Nil vols") - assert.Equal(t, len(vols), 1, "Expect 1 volume actual %v volumes", len(vols)) - assert.Equal(t, vols[0].ID, ctx.volID, "Expect volID %v actual %v", ctx.volID, vols[0].ID) + vols, err = ctx.Enumerate(&api.VolumeLocator{Name: "foo"}, nil) + require.NoError(t, err, "Failed in Enumerate") + require.NotNil(t, vols, "Nil vols") + require.Equal(t, len(vols), 1, "Expect 1 volume actual %v volumes", len(vols)) + require.Equal(t, vols[0].Id, ctx.volID, "Expect volID %v actual %v", ctx.volID, vols[0].Id) - vols, err = ctx.Enumerate(api.VolumeLocator{Name: "shouldNotExist"}, nil) - assert.Equal(t, len(vols), 0, "Expect 0 volume actual %v volumes", len(vols)) + vols, err = ctx.Enumerate(&api.VolumeLocator{Name: "shouldNotExist"}, nil) + require.Equal(t, len(vols), 0, "Expect 0 volume actual %v volumes", len(vols)) } func waitReady(t *testing.T, ctx *Context) error { total := time.Minute * 5 inc := time.Second * 2 elapsed := time.Second * 0 - vols, err := ctx.Inspect([]api.VolumeID{ctx.volID}) - for err == nil && len(vols) == 1 && vols[0].Status != api.Up && elapsed < total { + vols, err := ctx.Inspect([]string{ctx.volID}) + for err == nil && len(vols) == 1 && vols[0].Status != api.VolumeStatus_VOLUME_STATUS_UP && elapsed < total { time.Sleep(inc) elapsed += inc - vols, err = ctx.Inspect([]api.VolumeID{ctx.volID}) + vols, err = ctx.Inspect([]string{ctx.volID}) } if err != nil { return err @@ -162,7 +163,7 @@ func waitReady(t *testing.T, ctx *Context) error { if len(vols) != 1 { return fmt.Errorf("Expect one volume from inspect got %v", len(vols)) } - if vols[0].Status != api.Up { + if vols[0].Status != api.VolumeStatus_VOLUME_STATUS_UP { return fmt.Errorf("Timed out waiting for volume status %v", vols) } return err @@ -171,16 +172,16 @@ func waitReady(t *testing.T, ctx *Context) error { func attach(t *testing.T, ctx *Context) { fmt.Println("attach") err := waitReady(t, ctx) - assert.NoError(t, err, "Volume status is not up") + require.NoError(t, err, "Volume status is not up") p, err := ctx.Attach(ctx.volID) if err != nil { - assert.Equal(t, err, volume.ErrNotSupported, "Error on attach %v", err) + require.Equal(t, err, volume.ErrNotSupported, "Error on attach %v", err) } ctx.devicePath = p p, err = ctx.Attach(ctx.volID) if err == nil { - assert.Equal(t, p, ctx.devicePath, "Multiple calls to attach if not errored should return the same path") + require.Equal(t, p, ctx.devicePath, "Multiple calls to attach if not errored should return the same path") } } @@ -188,7 +189,7 @@ func detach(t *testing.T, ctx *Context) { fmt.Println("detach") err := ctx.Detach(ctx.volID) if err != nil { - assert.Equal(t, ctx.devicePath, "", "Error on detach %s: %v", ctx.devicePath, err) + require.Equal(t, ctx.devicePath, "", "Error on detach %s: %v", ctx.devicePath, err) } ctx.devicePath = "" } @@ -199,7 +200,7 @@ func mount(t *testing.T, ctx *Context) { err := os.MkdirAll(ctx.testPath, 0755) err = ctx.Mount(ctx.volID, ctx.testPath) - assert.NoError(t, err, "Failed in mount %v", ctx.testPath) + require.NoError(t, err, "Failed in mount %v", ctx.testPath) ctx.mountPath = ctx.testPath } @@ -214,7 +215,7 @@ func multiMount(t *testing.T, ctx *Context) { attach(t, &ctx2) err := ctx2.Mount(ctx2.volID, ctx2.testPath) - assert.Error(t, err, "Mount of different devices to same path must fail") + require.Error(t, err, "Mount of different devices to same path must fail") unmount(t, ctx) detach(t, ctx) @@ -227,10 +228,10 @@ func multiMount(t *testing.T, ctx *Context) { func unmount(t *testing.T, ctx *Context) { fmt.Println("unmount") - assert.NotEqual(t, ctx.mountPath, "", "Device is not mounted") + require.NotEqual(t, ctx.mountPath, "", "Device is not mounted") err := ctx.Unmount(ctx.volID, ctx.mountPath) - assert.NoError(t, err, "Failed in unmount %v", ctx.mountPath) + require.NoError(t, err, "Failed in unmount %v", ctx.mountPath) ctx.mountPath = "" } @@ -242,94 +243,94 @@ func shutdown(t *testing.T, ctx *Context) { func io(t *testing.T, ctx *Context) { fmt.Println("io") - assert.NotEqual(t, ctx.mountPath, "", "Device is not mounted") + require.NotEqual(t, ctx.mountPath, "", "Device is not mounted") cmd := exec.Command("dd", "if=/dev/urandom", fmt.Sprintf("of=%s", ctx.testFile), "bs=1M", "count=10") o, err := cmd.CombinedOutput() - assert.NoError(t, err, "Failed to run dd %s", string(o)) + require.NoError(t, err, "Failed to run dd %s", string(o)) cmd = exec.Command("dd", fmt.Sprintf("if=%s", ctx.testFile), fmt.Sprintf("of=%s/xx", ctx.mountPath)) o, err = cmd.CombinedOutput() - assert.NoError(t, err, "Failed to run dd on mountpoint %s/xx : %s", + require.NoError(t, err, "Failed to run dd on mountpoint %s/xx : %s", ctx.mountPath, string(o)) cmd = exec.Command("diff", ctx.testFile, fmt.Sprintf("%s/xx", ctx.mountPath)) o, err = cmd.CombinedOutput() - assert.NoError(t, err, "data mismatch %s", string(o)) + require.NoError(t, err, "data mismatch %s", string(o)) } func detachBad(t *testing.T, ctx *Context) { err := ctx.Detach(ctx.volID) - assert.True(t, (err == nil || err == volume.ErrNotSupported), + require.True(t, (err == nil || err == volume.ErrNotSupported), "Detach on mounted device should fail") } func deleteBad(t *testing.T, ctx *Context) { fmt.Println("deleteBad") - assert.NotEqual(t, ctx.mountPath, "", "Device is not mounted") + require.NotEqual(t, ctx.mountPath, "", "Device is not mounted") err := ctx.Delete(ctx.volID) - assert.Error(t, err, "Delete on mounted device must fail") + require.Error(t, err, "Delete on mounted device must fail") } func delete(t *testing.T, ctx *Context) { fmt.Println("delete") err := ctx.Delete(ctx.volID) - assert.NoError(t, err, "Delete failed") - ctx.volID = api.BadVolumeID + require.NoError(t, err, "Delete failed") + ctx.volID = "" } func snap(t *testing.T, ctx *Context) { fmt.Println("snap") - if ctx.volID == api.BadVolumeID { + if ctx.volID == "" { create(t, ctx) } attach(t, ctx) - labels := api.Labels{"oh": "snap"} - assert.NotEqual(t, ctx.volID, api.BadVolumeID, "invalid volume ID") + labels := map[string]string{"oh": "snap"} + require.NotEqual(t, ctx.volID, "", "invalid volume ID") id, err := ctx.Snapshot(ctx.volID, false, - api.VolumeLocator{Name: "snappy", VolumeLabels: labels}) - assert.NoError(t, err, "Failed in creating a snapshot") + &api.VolumeLocator{Name: "snappy", VolumeLabels: labels}) + require.NoError(t, err, "Failed in creating a snapshot") ctx.snapID = id } func snapInspect(t *testing.T, ctx *Context) { fmt.Println("snapInspect") - snaps, err := ctx.Inspect([]api.VolumeID{ctx.snapID}) - assert.NoError(t, err, "Failed in Inspect") - assert.NotNil(t, snaps, "Nil snaps") - assert.Equal(t, len(snaps), 1, "Expect 1 snaps actual %v snaps", len(snaps)) - assert.Equal(t, snaps[0].ID, ctx.snapID, "Expect snapID %v actual %v", ctx.snapID, snaps[0].ID) + snaps, err := ctx.Inspect([]string{ctx.snapID}) + require.NoError(t, err, "Failed in Inspect") + require.NotNil(t, snaps, "Nil snaps") + require.Equal(t, len(snaps), 1, "Expect 1 snaps actual %v snaps", len(snaps)) + require.Equal(t, snaps[0].Id, ctx.snapID, "Expect snapID %v actual %v", ctx.snapID, snaps[0].Id) - snaps, err = ctx.Inspect([]api.VolumeID{api.VolumeID("shouldNotExist")}) - assert.Equal(t, 0, len(snaps), "Expect 0 snaps actual %v snaps", len(snaps)) + snaps, err = ctx.Inspect([]string{string("shouldNotExist")}) + require.Equal(t, 0, len(snaps), "Expect 0 snaps actual %v snaps", len(snaps)) } func snapEnumerate(t *testing.T, ctx *Context) { fmt.Println("snapEnumerate") snaps, err := ctx.SnapEnumerate(nil, nil) - assert.NoError(t, err, "Failed in snapEnumerate") - assert.NotNil(t, snaps, "Nil snaps") - assert.Equal(t, 1, len(snaps), "Expect 1 snaps actual %v snaps", len(snaps)) - assert.Equal(t, snaps[0].ID, ctx.snapID, "Expect snapID %v actual %v", ctx.snapID, snaps[0].ID) + require.NoError(t, err, "Failed in snapEnumerate") + require.NotNil(t, snaps, "Nil snaps") + require.Equal(t, 1, len(snaps), "Expect 1 snaps actual %v snaps", len(snaps)) + require.Equal(t, snaps[0].Id, ctx.snapID, "Expect snapID %v actual %v", ctx.snapID, snaps[0].Id) labels := snaps[0].Locator.VolumeLabels - snaps, err = ctx.SnapEnumerate([]api.VolumeID{ctx.volID}, nil) - assert.NoError(t, err, "Failed in snapEnumerate") - assert.NotNil(t, snaps, "Nil snaps") - assert.Equal(t, len(snaps), 1, "Expect 1 snap actual %v snaps", len(snaps)) - assert.Equal(t, snaps[0].ID, ctx.snapID, "Expect snapID %v actual %v", ctx.snapID, snaps[0].ID) + snaps, err = ctx.SnapEnumerate([]string{ctx.volID}, nil) + require.NoError(t, err, "Failed in snapEnumerate") + require.NotNil(t, snaps, "Nil snaps") + require.Equal(t, len(snaps), 1, "Expect 1 snap actual %v snaps", len(snaps)) + require.Equal(t, snaps[0].Id, ctx.snapID, "Expect snapID %v actual %v", ctx.snapID, snaps[0].Id) - snaps, err = ctx.SnapEnumerate([]api.VolumeID{api.VolumeID("shouldNotExist")}, nil) - assert.Equal(t, len(snaps), 0, "Expect 0 snap actual %v snaps", len(snaps)) + snaps, err = ctx.SnapEnumerate([]string{string("shouldNotExist")}, nil) + require.Equal(t, len(snaps), 0, "Expect 0 snap actual %v snaps", len(snaps)) snaps, err = ctx.SnapEnumerate(nil, labels) - assert.NoError(t, err, "Failed in snapEnumerate") - assert.NotNil(t, snaps, "Nil snaps") - assert.Equal(t, len(snaps), 1, "Expect 1 snap actual %v snaps", len(snaps)) - assert.Equal(t, snaps[0].ID, ctx.snapID, "Expect snapID %v actual %v", ctx.snapID, snaps[0].ID) + require.NoError(t, err, "Failed in snapEnumerate") + require.NotNil(t, snaps, "Nil snaps") + require.Equal(t, len(snaps), 1, "Expect 1 snap actual %v snaps", len(snaps)) + require.Equal(t, snaps[0].Id, ctx.snapID, "Expect snapID %v actual %v", ctx.snapID, snaps[0].Id) } func snapDiff(t *testing.T, ctx *Context) { @@ -343,10 +344,10 @@ func snapDelete(t *testing.T, ctx *Context) { func init() { kv, err := kvdb.New(mem.Name, "driver_test", []string{}, nil) if err != nil { - logrus.Panicf("Failed to intialize KVDB") + dlog.Panicf("Failed to intialize KVDB") } err = kvdb.SetInstance(kv) if err != nil { - logrus.Panicf("Failed to set KVDB instance") + dlog.Panicf("Failed to set KVDB instance") } } diff --git a/volume/drivers/vfs/vfs.go b/volume/drivers/vfs/vfs.go index 8095c26ad..7fe4dd044 100644 --- a/volume/drivers/vfs/vfs.go +++ b/volume/drivers/vfs/vfs.go @@ -3,24 +3,29 @@ package vfs import ( "fmt" "os" - "path" + "path/filepath" "strings" "syscall" - "time" - "github.com/Sirupsen/logrus" + "go.pedge.io/dlog" + "github.com/libopenstorage/openstorage/api" + "github.com/libopenstorage/openstorage/config" "github.com/libopenstorage/openstorage/volume" + "github.com/libopenstorage/openstorage/volume/drivers/common" "github.com/pborman/uuid" "github.com/portworx/kvdb" ) const ( Name = "vfs" - Type = api.File - volumeBase = "/var/lib/osd/" + Type = api.DriverType_DRIVER_TYPE_FILE ) +func init() { + volume.Register(Name, Init) +} + type driver struct { *volume.IoNotSupported *volume.DefaultBlockDriver @@ -44,55 +49,45 @@ func (d *driver) Type() api.DriverType { return Type } -func (d *driver) Create(locator api.VolumeLocator, source *api.Source, spec *api.VolumeSpec) (api.VolumeID, error) { - +func (d *driver) Create(locator *api.VolumeLocator, source *api.Source, spec *api.VolumeSpec) (string, error) { volumeID := uuid.New() volumeID = strings.TrimSuffix(volumeID, "\n") - // Create a directory on the Local machine with this UUID. - err := os.MkdirAll(path.Join(volumeBase, string(volumeID)), 0744) - if err != nil { - logrus.Println(err) - return api.BadVolumeID, err - } - - v := &api.Volume{ - ID: api.VolumeID(volumeID), - Locator: locator, - Ctime: time.Now(), - Spec: spec, - LastScan: time.Now(), - Format: "vfs", - State: api.VolumeAvailable, - Status: api.Up, - DevicePath: path.Join(volumeBase, string(volumeID)), + if err := os.MkdirAll(filepath.Join(config.VolumeBase, string(volumeID)), 0744); err != nil { + dlog.Println(err) + return "", err } - err = d.CreateVol(v) - if err != nil { - return api.BadVolumeID, err + v := common.NewVolume( + volumeID, + api.FSType_FS_TYPE_VFS, + locator, + source, + spec, + ) + v.DevicePath = filepath.Join(config.VolumeBase, volumeID) + + if err := d.CreateVol(v); err != nil { + return "", err } - - err = d.UpdateVol(v) - return v.ID, err - + return v.Id, d.UpdateVol(v) } -func (d *driver) Delete(volumeID api.VolumeID) error { +func (d *driver) Delete(volumeID string) error { // Check if volume exists _, err := d.GetVol(volumeID) if err != nil { - logrus.Println("Volume not found ", err) + dlog.Println("Volume not found ", err) return err } // Delete the directory - os.RemoveAll(path.Join(volumeBase, string(volumeID))) + os.RemoveAll(filepath.Join(config.VolumeBase, string(volumeID))) err = d.DeleteVol(volumeID) if err != nil { - logrus.Println(err) + dlog.Println(err) return err } @@ -102,30 +97,26 @@ func (d *driver) Delete(volumeID api.VolumeID) error { // Mount volume at specified path // Errors ErrEnoEnt, ErrVolDetached may be returned. -func (d *driver) Mount(volumeID api.VolumeID, mountpath string) error { - +func (d *driver) Mount(volumeID string, mountpath string) error { v, err := d.GetVol(volumeID) if err != nil { - logrus.Println(err) + dlog.Println(err) return err } syscall.Unmount(mountpath, 0) - err = syscall.Mount(path.Join(volumeBase, string(volumeID)), mountpath, string(v.Spec.Format), syscall.MS_BIND, "") - if err != nil { - logrus.Printf("Cannot mount %s at %s because %+v", - path.Join(volumeBase, string(volumeID)), mountpath, err) + if err := syscall.Mount(filepath.Join(config.VolumeBase, string(volumeID)), mountpath, string(v.Spec.Format), syscall.MS_BIND, ""); err != nil { + dlog.Printf("Cannot mount %s at %s because %+v", filepath.Join(config.VolumeBase, string(volumeID)), mountpath, err) return err } - v.AttachPath = mountpath + // TODO(pedge): why ignoring error? err = d.UpdateVol(v) - return nil } // Unmount volume at specified path // Errors ErrEnoEnt, ErrVolDetached may be returned. -func (d *driver) Unmount(volumeID api.VolumeID, mountpath string) error { +func (d *driver) Unmount(volumeID string, mountpath string) error { v, err := d.GetVol(volumeID) if err != nil { return err @@ -133,18 +124,17 @@ func (d *driver) Unmount(volumeID api.VolumeID, mountpath string) error { if v.AttachPath == "" { return fmt.Errorf("Device %v not mounted", volumeID) } - err = syscall.Unmount(v.AttachPath, 0) - if err != nil { + if err := syscall.Unmount(v.AttachPath, 0); err != nil { return err } - v.AttachPath = "" + // TODO(pedge): why ignoring error? err = d.UpdateVol(v) return nil } // Set update volume with specified parameters. -func (d *driver) Set(volumeID api.VolumeID, locator *api.VolumeLocator, spec *api.VolumeSpec) error { +func (d *driver) Set(volumeID string, locator *api.VolumeLocator, spec *api.VolumeSpec) error { if spec != nil { return volume.ErrNotSupported } @@ -153,20 +143,19 @@ func (d *driver) Set(volumeID api.VolumeID, locator *api.VolumeLocator, spec *ap return err } if locator != nil { - v.Locator = *locator + v.Locator = locator } - err = d.UpdateVol(v) - return err + return d.UpdateVol(v) } // Stats Not Supported. -func (d *driver) Stats(volumeID api.VolumeID) (api.Stats, error) { - return api.Stats{}, volume.ErrNotSupported +func (d *driver) Stats(volumeID string) (*api.Stats, error) { + return nil, volume.ErrNotSupported } // Alerts Not Supported. -func (d *driver) Alerts(volumeID api.VolumeID) (api.Alerts, error) { - return api.Alerts{}, volume.ErrNotSupported +func (d *driver) Alerts(volumeID string) (*api.Alerts, error) { + return nil, volume.ErrNotSupported } // Status returns a set of key-value pairs which give low @@ -177,9 +166,5 @@ func (d *driver) Status() [][2]string { // Shutdown and cleanup. func (d *driver) Shutdown() { - logrus.Debugf("%s Shutting down", Name) -} - -func init() { - volume.Register(Name, Init) + dlog.Debugf("%s Shutting down", Name) } diff --git a/volume/enumerator.go b/volume/enumerator.go index 930e4d633..21449a65c 100644 --- a/volume/enumerator.go +++ b/volume/enumerator.go @@ -3,6 +3,7 @@ package volume import ( "encoding/json" "fmt" + // TODO(pedge): what is this for? _ "sync" "github.com/portworx/kvdb" @@ -11,14 +12,12 @@ import ( ) const ( - keyBase = "openstorage/" - locks = "/locks/" - volumes = "/volumes/" + keyBase = "openstorage" ) type Store interface { - // Lock volume specified by volID. - Lock(volID api.VolumeID) (interface{}, error) + // Lock volume specified by volumeID. + Lock(volumeID string) (interface{}, error) // Lock volume with token obtained from call to Lock. Unlock(token interface{}) error @@ -26,67 +25,20 @@ type Store interface { // CreateVol returns error if volume with the same ID already existe. CreateVol(vol *api.Volume) error - // GetVol from volID. - GetVol(volID api.VolumeID) (*api.Volume, error) + // GetVol from volumeID. + GetVol(volumeID string) (*api.Volume, error) // UpdateVol with vol UpdateVol(vol *api.Volume) error // DeleteVol. Returns error if volume does not exist. - DeleteVol(volID api.VolumeID) error + DeleteVol(volumeID string) error } // DefaultEnumerator for volume information. Implements the Enumerator Interface type DefaultEnumerator struct { kvdb kvdb.Kvdb driver string - lockKeyPrefix string - volKeyPrefix string -} - -func (e *DefaultEnumerator) lockKey(volID api.VolumeID) string { - return e.volKeyPrefix + string(volID) + ".lock" -} - -func (e *DefaultEnumerator) volKey(volID api.VolumeID) string { - return e.volKeyPrefix + string(volID) -} - -func hasSubset(set api.Labels, subset api.Labels) bool { - if subset == nil || len(subset) == 0 { - return true - } - if set == nil { - return false - } - for k := range subset { - if _, ok := set[k]; !ok { - return false - } - } - return true -} - -func contains(volID api.VolumeID, set []api.VolumeID) bool { - if len(set) == 0 { - return true - } - for _, v := range set { - if v == volID { - return true - } - } - return false -} - -func match(v *api.Volume, locator api.VolumeLocator, configLabels api.Labels) bool { - if locator.Name != "" && v.Locator.Name != locator.Name { - return false - } - if !hasSubset(v.Locator.VolumeLabels, locator.VolumeLabels) { - return false - } - return hasSubset(v.Spec.ConfigLabels, configLabels) } // NewDefaultEnumerator initializes store with specified kvdb. @@ -94,14 +46,12 @@ func NewDefaultEnumerator(driver string, kvdb kvdb.Kvdb) *DefaultEnumerator { return &DefaultEnumerator{ kvdb: kvdb, driver: driver, - lockKeyPrefix: keyBase + driver + locks, - volKeyPrefix: keyBase + driver + volumes, } } -// Lock volume specified by volID. -func (e *DefaultEnumerator) Lock(volID api.VolumeID) (interface{}, error) { - return e.kvdb.Lock(e.lockKey(volID), 10) +// Lock volume specified by volumeID. +func (e *DefaultEnumerator) Lock(volumeID string) (interface{}, error) { + return e.kvdb.Lock(e.lockKey(volumeID), 10) } // Lock volume with token obtained from call to Lock. @@ -115,94 +65,149 @@ func (e *DefaultEnumerator) Unlock(token interface{}) error { // CreateVol returns error if volume with the same ID already existe. func (e *DefaultEnumerator) CreateVol(vol *api.Volume) error { - _, err := e.kvdb.Create(e.volKey(vol.ID), vol, 0) + _, err := e.kvdb.Create(e.volKey(vol.Id), vol, 0) return err } -// GetVol from volID. -func (e *DefaultEnumerator) GetVol(volID api.VolumeID) (*api.Volume, error) { +// GetVol from volumeID. +func (e *DefaultEnumerator) GetVol(volumeID string) (*api.Volume, error) { var v api.Volume - _, err := e.kvdb.GetVal(e.volKey(volID), &v) - + _, err := e.kvdb.GetVal(e.volKey(volumeID), &v) return &v, err } // UpdateVol with vol func (e *DefaultEnumerator) UpdateVol(vol *api.Volume) error { - _, err := e.kvdb.Put(e.volKey(vol.ID), vol, 0) + _, err := e.kvdb.Put(e.volKey(vol.Id), vol, 0) return err } // DeleteVol. Returns error if volume does not exist. -func (e *DefaultEnumerator) DeleteVol(volID api.VolumeID) error { - _, err := e.kvdb.Delete(e.volKey(volID)) +func (e *DefaultEnumerator) DeleteVol(volumeID string) error { + _, err := e.kvdb.Delete(e.volKey(volumeID)) return err } // Inspect specified volumes. // Returns slice of volumes that were found. -func (e *DefaultEnumerator) Inspect(ids []api.VolumeID) ([]api.Volume, error) { - var err error - var vol *api.Volume - vols := make([]api.Volume, 0, len(ids)) - - for _, v := range ids { - vol, err = e.GetVol(v) +func (e *DefaultEnumerator) Inspect(ids []string) ([]*api.Volume, error) { + volumes := make([]*api.Volume, 0, len(ids)) + for _, id := range ids { + volume, err := e.GetVol(id) // XXX Distinguish between ENOENT and an internal error from KVDB if err != nil { continue } - vols = append(vols, *vol) + volumes = append(volumes, volume) } - return vols, nil + return volumes, nil } // Enumerate volumes that map to the volumeLocator. Locator fields may be regexp. // If locator fields are left blank, this will return all volumee. -func (e *DefaultEnumerator) Enumerate(locator api.VolumeLocator, - labels api.Labels) ([]api.Volume, error) { +func (e *DefaultEnumerator) Enumerate( + locator *api.VolumeLocator, + labels map[string]string, +) ([]*api.Volume, error) { - kvp, err := e.kvdb.Enumerate(e.volKeyPrefix) + kvp, err := e.kvdb.Enumerate(e.volKeyPrefix()) if err != nil { return nil, err } - vols := make([]api.Volume, 0, len(kvp)) + volumes := make([]*api.Volume, 0, len(kvp)) for _, v := range kvp { - var elem api.Volume - err = json.Unmarshal(v.Value, &elem) - if err != nil { + elem := &api.Volume{} + if err := json.Unmarshal(v.Value, elem); err != nil { return nil, err } - if match(&elem, locator, labels) { - vols = append(vols, elem) + if match(elem, locator, labels) { + volumes = append(volumes, elem) } } - return vols, nil + return volumes, nil } // SnapEnumerate for specified volume func (e *DefaultEnumerator) SnapEnumerate( - volIDs []api.VolumeID, - labels api.Labels) ([]api.Volume, error) { - kvp, err := e.kvdb.Enumerate(e.volKeyPrefix) + volumeIDs []string, + labels map[string]string, +) ([]*api.Volume, error) { + kvp, err := e.kvdb.Enumerate(e.volKeyPrefix()) if err != nil { return nil, err } - vols := make([]api.Volume, 0, len(kvp)) + volumes := make([]*api.Volume, 0, len(kvp)) for _, v := range kvp { - var elem api.Volume - err = json.Unmarshal(v.Value, &elem) - if err != nil { + elem := &api.Volume{} + if err := json.Unmarshal(v.Value, elem); err != nil { return nil, err } if elem.Source == nil || - elem.Source.Parent == api.BadVolumeID || - (volIDs != nil && !contains(elem.Source.Parent, volIDs)) { + elem.Source.Parent == "" || + (volumeIDs != nil && !contains(elem.Source.Parent, volumeIDs)) { continue } if hasSubset(elem.Locator.VolumeLabels, labels) { - vols = append(vols, elem) + volumes = append(volumes, elem) } } - return vols, nil + return volumes, nil +} + +func (e *DefaultEnumerator) lockKey(volumeID string) string { + return e.volKeyPrefix() + volumeID + ".lock" +} + +func (e *DefaultEnumerator) volKey(volumeID string) string { + return e.volKeyPrefix() + volumeID +} + +// TODO(pedge): not used - bug? +func (d *DefaultEnumerator) lockKeyPrefix() string { + return fmt.Sprintf("%s/%s/locks/", keyBase, d.driver) +} + +func (d *DefaultEnumerator) volKeyPrefix() string { + return fmt.Sprintf("%s/%s/volumes/", keyBase, d.driver) +} + +func hasSubset(set map[string]string, subset map[string]string) bool { + if subset == nil || len(subset) == 0 { + return true + } + if set == nil { + return false + } + for k := range subset { + if _, ok := set[k]; !ok { + return false + } + } + return true +} + +func contains(volumeID string, set []string) bool { + if len(set) == 0 { + return true + } + for _, v := range set { + if v == volumeID { + return true + } + } + return false +} + +func match( + v *api.Volume, + locator *api.VolumeLocator, + configLabels map[string]string, +) bool { + if locator.Name != "" && v.Locator.Name != locator.Name { + return false + } + if !hasSubset(v.Locator.VolumeLabels, locator.VolumeLabels) { + return false + } + return hasSubset(v.Spec.ConfigLabels, configLabels) } diff --git a/volume/enumerator_test.go b/volume/enumerator_test.go index 2fe65318b..33cf3c03e 100644 --- a/volume/enumerator_test.go +++ b/volume/enumerator_test.go @@ -3,7 +3,8 @@ package volume import ( "testing" - "github.com/Sirupsen/logrus" + "go.pedge.io/dlog" + "github.com/libopenstorage/openstorage/api" "github.com/portworx/kvdb" "github.com/portworx/kvdb/mem" @@ -11,134 +12,124 @@ import ( ) var ( - e *DefaultEnumerator - volName = "TestVolume" - snapName = "SnapVolume" - labels = api.Labels{"Foo": "DEADBEEF"} + testEnumerator *DefaultEnumerator + testLabels = map[string]string{"Foo": "DEADBEEF"} ) -func TestInspect(t *testing.T) { - id := api.VolumeID(volName) - vol := api.Volume{ - ID: id, - Locator: api.VolumeLocator{Name: volName, VolumeLabels: labels}, - State: api.VolumeAvailable, - Spec: &api.VolumeSpec{}, +func init() { + kv, err := kvdb.New(mem.Name, "driver_test", []string{}, nil) + if err != nil { + dlog.Panicf("Failed to initialize KVDB") + } + if err := kvdb.SetInstance(kv); err != nil { + dlog.Panicf("Failed to set KVDB instance") } - err := e.CreateVol(&vol) + testEnumerator = NewDefaultEnumerator("enumerator_test", kv) +} + +func TestInspect(t *testing.T) { + volume := newTestVolume("TestVolume") + err := testEnumerator.CreateVol(volume) assert.NoError(t, err, "Failed in CreateVol") - vols, err := e.Inspect([]api.VolumeID{id}) + volumes, err := testEnumerator.Inspect([]string{volume.Id}) assert.NoError(t, err, "Failed in Inspect") - assert.Equal(t, len(vols), 1, "Number of volumes returned in inspect should be 1") - if len(vols) == 1 { - assert.Equal(t, vols[0].ID, vol.ID, "Invalid volume returned in Inspect") + assert.Equal(t, len(volumes), 1, "Number of volumes returned in inspect should be 1") + if len(volumes) == 1 { + assert.Equal(t, volumes[0].Id, volume.Id, "Invalid volume returned in Inspect") } - err = e.DeleteVol(id) + err = testEnumerator.DeleteVol(volume.Id) assert.NoError(t, err, "Failed in Delete") - vols, err = e.Inspect([]api.VolumeID{id}) - assert.NotNil(t, vols, "Inspect returned nil vols") - assert.Equal(t, len(vols), 0, "Number of volumes returned in inspect should be 0") + volumes, err = testEnumerator.Inspect([]string{volume.Id}) + assert.NotNil(t, volumes, "Inspect returned nil volumes") + assert.Equal(t, len(volumes), 0, "Number of volumes returned in inspect should be 0") } func TestEnumerate(t *testing.T) { - id := api.VolumeID(volName) - vol := api.Volume{ - ID: id, - Locator: api.VolumeLocator{Name: volName, VolumeLabels: labels}, - State: api.VolumeAvailable, - Spec: &api.VolumeSpec{}, - } - err := e.CreateVol(&vol) + volume := newTestVolume("TestVolume") + err := testEnumerator.CreateVol(volume) assert.NoError(t, err, "Failed in CreateVol") - vols, err := e.Enumerate(api.VolumeLocator{}, nil) + volumes, err := testEnumerator.Enumerate(&api.VolumeLocator{}, nil) assert.NoError(t, err, "Failed in Enumerate") - assert.Equal(t, 1, len(vols), "Number of volumes returned in enumerate should be 1") + assert.Equal(t, 1, len(volumes), "Number of volumes returned in enumerate should be 1") - vols, err = e.Enumerate(api.VolumeLocator{Name: volName}, nil) + volumes, err = testEnumerator.Enumerate(&api.VolumeLocator{Name: volume.Id}, nil) assert.NoError(t, err, "Failed in Enumerate") - assert.Equal(t, 1, len(vols), "Number of volumes returned in enumerate should be 1") - if len(vols) == 1 { - assert.Equal(t, vols[0].ID, vol.ID, "Invalid volume returned in Enumerate") + assert.Equal(t, 1, len(volumes), "Number of volumes returned in enumerate should be 1") + if len(volumes) == 1 { + assert.Equal(t, volumes[0].Id, volume.Id, "Invalid volume returned in Enumerate") } - vols, err = e.Enumerate(api.VolumeLocator{VolumeLabels: labels}, nil) + volumes, err = testEnumerator.Enumerate(&api.VolumeLocator{VolumeLabels: testLabels}, nil) assert.NoError(t, err, "Failed in Enumerate") - assert.Equal(t, len(vols), 1, "Number of volumes returned in enumerate should be 1") - if len(vols) == 1 { - assert.Equal(t, vols[0].ID, vol.ID, "Invalid volume returned in Enumerate") + assert.Equal(t, len(volumes), 1, "Number of volumes returned in enumerate should be 1") + if len(volumes) == 1 { + assert.Equal(t, volumes[0].Id, volume.Id, "Invalid volume returned in Enumerate") } - err = e.DeleteVol(id) + err = testEnumerator.DeleteVol(volume.Id) assert.NoError(t, err, "Failed in Delete") - vols, err = e.Enumerate(api.VolumeLocator{Name: volName}, nil) - assert.Equal(t, len(vols), 0, "Number of volumes returned in enumerate should be 0") + volumes, err = testEnumerator.Enumerate(&api.VolumeLocator{Name: volume.Id}, nil) + assert.Equal(t, len(volumes), 0, "Number of volumes returned in enumerate should be 0") } func TestSnapEnumerate(t *testing.T) { - snapID := api.VolumeID(snapName) - id := api.VolumeID(volName) - vol := api.Volume{ - ID: id, - Locator: api.VolumeLocator{Name: volName, VolumeLabels: labels}, - State: api.VolumeAvailable, - Spec: &api.VolumeSpec{}, - } - err := e.CreateVol(&vol) + vol := newTestVolume("TestVolume") + err := testEnumerator.CreateVol(vol) assert.NoError(t, err, "Failed in CreateVol") - snap := api.Volume{ - ID: snapID, - Locator: api.VolumeLocator{Name: volName, VolumeLabels: labels}, - State: api.VolumeAvailable, - Spec: &api.VolumeSpec{}, - Source: &api.Source{Parent: id}, - } - err = e.CreateVol(&snap) + snap := newSnapVolume("SnapVolume", "TestVolume") + err = testEnumerator.CreateVol(snap) assert.NoError(t, err, "Failed in CreateSnap") - snaps, err := e.SnapEnumerate([]api.VolumeID{id}, nil) + snaps, err := testEnumerator.SnapEnumerate([]string{vol.Id}, nil) assert.NoError(t, err, "Failed in Enumerate") assert.Equal(t, len(snaps), 1, "Number of snaps returned in enumerate should be 1") if len(snaps) == 1 { - assert.Equal(t, snaps[0].ID, snap.ID, "Invalid snap returned in Enumerate") + assert.Equal(t, snaps[0].Id, snap.Id, "Invalid snap returned in Enumerate") } - snaps, err = e.SnapEnumerate([]api.VolumeID{id}, labels) + snaps, err = testEnumerator.SnapEnumerate([]string{vol.Id}, testLabels) assert.NoError(t, err, "Failed in Enumerate") assert.Equal(t, len(snaps), 1, "Number of snaps returned in enumerate should be 1") if len(snaps) == 1 { - assert.Equal(t, snaps[0].ID, snap.ID, "Invalid snap returned in Enumerate") + assert.Equal(t, snaps[0].Id, snap.Id, "Invalid snap returned in Enumerate") } - snaps, err = e.SnapEnumerate(nil, labels) + snaps, err = testEnumerator.SnapEnumerate(nil, testLabels) assert.NoError(t, err, "Failed in Enumerate") assert.True(t, len(snaps) >= 1, "Number of snaps returned in enumerate should be at least 1") if len(snaps) == 1 { - assert.Equal(t, snaps[0].ID, snap.ID, "Invalid snap returned in Enumerate") + assert.Equal(t, snaps[0].Id, snap.Id, "Invalid snap returned in Enumerate") } - snaps, err = e.SnapEnumerate(nil, nil) + snaps, err = testEnumerator.SnapEnumerate(nil, nil) assert.NoError(t, err, "Failed in Enumerate") assert.True(t, len(snaps) >= 1, "Number of snaps returned in enumerate should be at least 1") if len(snaps) == 1 { - assert.Equal(t, snaps[0].ID, snap.ID, "Invalid snap returned in Enumerate") + assert.Equal(t, snaps[0].Id, snap.Id, "Invalid snap returned in Enumerate") } - err = e.DeleteVol(snapID) + err = testEnumerator.DeleteVol(snap.Id) assert.NoError(t, err, "Failed in Delete") - snaps, err = e.SnapEnumerate([]api.VolumeID{id}, labels) + snaps, err = testEnumerator.SnapEnumerate([]string{vol.Id}, testLabels) assert.NotNil(t, snaps, "Inspect returned nil snaps") assert.Equal(t, len(snaps), 0, "Number of snaps returned in enumerate should be 0") - err = e.DeleteVol(id) + err = testEnumerator.DeleteVol(vol.Id) assert.NoError(t, err, "Failed in Delete") } -func init() { - kv, err := kvdb.New(mem.Name, "driver_test", []string{}, nil) - if err != nil { - logrus.Panicf("Failed to intialize KVDB") - } - err = kvdb.SetInstance(kv) - if err != nil { - logrus.Panicf("Failed to set KVDB instance") +func newTestVolume(id string) *api.Volume { + return &api.Volume{ + Id: id, + Locator: &api.VolumeLocator{Name: id, VolumeLabels: testLabels}, + State: api.VolumeState_VOLUME_STATE_AVAILABLE, + Spec: &api.VolumeSpec{}, } +} - e = NewDefaultEnumerator("enumerator_test", kv) +func newSnapVolume(snapID string, volumeID string) *api.Volume { + return &api.Volume{ + Id: snapID, + Locator: &api.VolumeLocator{Name: volumeID, VolumeLabels: testLabels}, + State: api.VolumeState_VOLUME_STATE_AVAILABLE, + Spec: &api.VolumeSpec{}, + Source: &api.Source{Parent: volumeID}, + } } diff --git a/volume/io_not_supported.go b/volume/io_not_supported.go index 066a56f3e..f1c38cf3e 100644 --- a/volume/io_not_supported.go +++ b/volume/io_not_supported.go @@ -1,23 +1,25 @@ package volume -import "github.com/libopenstorage/openstorage/api" +type IoNotSupported struct {} -type IoNotSupported struct { -} - -func (io *IoNotSupported) Read(volumeID api.VolumeID, - buf []byte, - sz uint64, - offset int64) (int64, error) { +func (io *IoNotSupported) Read( + volumeID string, + buffer []byte, + size uint64, + offset int64, +) (int64, error) { return 0, ErrNotSupported } -func (io *IoNotSupported) Write(volumeID api.VolumeID, - buf []byte, - sz uint64, - offset int64) (int64, error) { +func (io *IoNotSupported) Write( + volumeID string, + buffer []byte, + size uint64, + offset int64, +) (int64, error) { return 0, ErrNotSupported } -func (io *IoNotSupported) Flush(volumeID api.VolumeID) error { + +func (io *IoNotSupported) Flush(volumeID string) error { return ErrNotSupported } diff --git a/volume/snapshot.go b/volume/snapshot.go index 270996b1d..764405a21 100644 --- a/volume/snapshot.go +++ b/volume/snapshot.go @@ -2,9 +2,12 @@ package volume import "github.com/libopenstorage/openstorage/api" -type SnapshotNotSupported struct { -} +type SnapshotNotSupported struct {} -func (s *SnapshotNotSupported) Snapshot(volumeID api.VolumeID, readonly bool, locator api.VolumeLocator) (api.VolumeID, error) { - return api.BadVolumeID, ErrNotSupported +func (s *SnapshotNotSupported) Snapshot( + volumeID string, + readonly bool, + locator *api.VolumeLocator, +) (string, error) { + return "", ErrNotSupported } diff --git a/volume/volume.go b/volume/volume.go index 601825989..adc7b8fb0 100644 --- a/volume/volume.go +++ b/volume/volume.go @@ -8,9 +8,6 @@ import ( ) var ( - instances map[string]VolumeDriver - drivers map[string]InitFunc - mutex sync.Mutex ErrExist = errors.New("Driver already exists") ErrDriverNotFound = errors.New("Driver implementation not found") ErrDriverInitializing = errors.New("Driver is initializing") @@ -21,8 +18,12 @@ var ( ErrVolAttached = errors.New("Volume is attached") ErrVolHasSnaps = errors.New("Volume has snapshots associated") ErrNotSupported = errors.New("Operation not supported") + instances map[string]VolumeDriver + drivers map[string]InitFunc + mutex sync.Mutex ) +// TODO(pedge): remove type DriverParams map[string]string type InitFunc func(params DriverParams) (VolumeDriver, error) @@ -41,21 +42,25 @@ type VolumeDriver interface { type IODriver interface { // Read sz bytes from specified volume at specified offset. // Return number of bytes read and error. - Read(volumeID api.VolumeID, + Read( + volumeID string, buf []byte, sz uint64, - offset int64) (int64, error) + offset int64, + ) (int64, error) // Write sz bytes from specified volume at specified offset. // Return number of bytes written and error. - Write(volumeID api.VolumeID, + Write( + volumeID string, buf []byte, sz uint64, - offset int64) (int64, error) + offset int64, + ) (int64, error) // Flush writes to stable storage. // Return error. - Flush(volumeID api.VolumeID) error + Flush(volumeID string) error } // ProtoDriver must be implemented by all volume drivers. It specifies the @@ -69,37 +74,39 @@ type ProtoDriver interface { // Create a new Vol for the specific volume spec. // It returns a system generated VolumeID that uniquely identifies the volume - Create(locator api.VolumeLocator, + Create( + locator *api.VolumeLocator, Source *api.Source, - spec *api.VolumeSpec) (api.VolumeID, error) + spec *api.VolumeSpec, + ) (string, error) // Delete volume. // Errors ErrEnoEnt, ErrVolHasSnaps may be returned. - Delete(volumeID api.VolumeID) error + Delete(volumeID string) error // Mount volume at specified path // Errors ErrEnoEnt, ErrVolDetached may be returned. - Mount(volumeID api.VolumeID, mountpath string) error + Mount(volumeID string, mountPath string) error // Unmount volume at specified path // Errors ErrEnoEnt, ErrVolDetached may be returned. - Unmount(volumeID api.VolumeID, mountpath string) error + Unmount(volumeID string, mountPath string) error // Update not all fields of the spec are supported, ErrNotSupported will be thrown for unsupported // updates. - Set(volumeID api.VolumeID, locator *api.VolumeLocator, spec *api.VolumeSpec) error + Set(volumeID string, locator *api.VolumeLocator, spec *api.VolumeSpec) error // Snapshot create volume snapshot. // Errors ErrEnoEnt may be returned - Snapshot(volumeID api.VolumeID, readonly bool, locator api.VolumeLocator) (api.VolumeID, error) + Snapshot(volumeID string, readonly bool, locator *api.VolumeLocator) (string, error) // Stats for specified volume. // Errors ErrEnoEnt may be returned - Stats(volumeID api.VolumeID) (api.Stats, error) + Stats(volumeID string) (*api.Stats, error) // Alerts on this volume. // Errors ErrEnoEnt may be returned - Alerts(volumeID api.VolumeID) (api.Alerts, error) + Alerts(volumeID string) (*api.Alerts, error) // Status returns a set of key-value pairs which give low // level diagnostic status about this driver. @@ -113,14 +120,14 @@ type ProtoDriver interface { type Enumerator interface { // Inspect specified volumes. // Returns slice of volumes that were found. - Inspect(volumeIDs []api.VolumeID) ([]api.Volume, error) + Inspect(volumeIDs []string) ([]*api.Volume, error) // Enumerate volumes that map to the volumeLocator. Locator fields may be regexp. // If locator fields are left blank, this will return all volumes. - Enumerate(locator api.VolumeLocator, labels api.Labels) ([]api.Volume, error) + Enumerate(locator *api.VolumeLocator, labels map[string]string) ([]*api.Volume, error) // Enumerate snaps for specified volumes - SnapEnumerate(volID []api.VolumeID, snapLabels api.Labels) ([]api.Volume, error) + SnapEnumerate(volID []string, snapLabels map[string]string) ([]*api.Volume, error) } // BlockDriver needs to be implemented by block volume drivers. Filesystem volume @@ -129,11 +136,11 @@ type BlockDriver interface { // Attach map device to the host. // On success the devicePath specifies location where the device is exported // Errors ErrEnoEnt, ErrVolAttached may be returned. - Attach(volumeID api.VolumeID) (string, error) + Attach(volumeID string) (string, error) // Detach device from the host. // Errors ErrEnoEnt, ErrVolDetached may be returned. - Detach(volumeID api.VolumeID) error + Detach(volumeID string) error } func Shutdown() {