diff --git a/src/stackdriver-nozzle/.envrc.template b/src/stackdriver-nozzle/.envrc.template index f5d641de..972f8648 100644 --- a/src/stackdriver-nozzle/.envrc.template +++ b/src/stackdriver-nozzle/.envrc.template @@ -1,7 +1,7 @@ # http://direnv.net export FIREHOSE_ENDPOINT=https://api.bosh-lite.com -export FIREHOSE_EVENTS_TO_STACKDRIVER_LOGGING=HttpStart,HttpStop,HttpStartStop,LogMessage,ValueMetric,CounterEvent,Error,ContainerMetric +export FIREHOSE_EVENTS_TO_STACKDRIVER_LOGGING=HttpStartStop,LogMessage,ValueMetric,CounterEvent,Error,ContainerMetric export FIREHOSE_EVENTS_TO_STACKDRIVER_MONITORING=ValueMetric,CounterEvent,ContainerMetric export FIREHOSE_USERNAME=admin export FIREHOSE_PASSWORD=admin diff --git a/src/stackdriver-nozzle/README.md b/src/stackdriver-nozzle/README.md index b2f52f42..7466a378 100644 --- a/src/stackdriver-nozzle/README.md +++ b/src/stackdriver-nozzle/README.md @@ -21,7 +21,7 @@ go get github.com/cloudfoundry-community/stackdriver-tools/src/stackdriver-nozzl - `FIREHOSE_ENDPOINT` - the CF API endpoint; e.g., `https://api.bosh-lite.com' - `FIREHOSE_EVENTS_TO_STACKDRIVER_LOGGING` - comma-separated list of events to pass to Stackdriver Logging; valid events are `LogMessage`, `ValueMetric`, `CounterEvent`, `Error`, - `ContainerMetric`, `HttpStart`, `HttpStop`, and `HttpStartStop` + `ContainerMetric`, and `HttpStartStop` - `FIREHOSE_EVENTS_TO_STACKDRIVER_MONITORING` - comma-separated list of events to pass to Stackdriver Monitoring; valid events are `ValueMetric`, `CounterEvent`, and `ContainerMetric` - `FIREHOSE_USERNAME` - CF username; defaults to `admin` diff --git a/src/stackdriver-nozzle/cloudfoundry/app_info_repository.go b/src/stackdriver-nozzle/cloudfoundry/app_info_repository.go index 3c842296..01722306 100644 --- a/src/stackdriver-nozzle/cloudfoundry/app_info_repository.go +++ b/src/stackdriver-nozzle/cloudfoundry/app_info_repository.go @@ -30,15 +30,17 @@ type appInfoRepository struct { func (air *appInfoRepository) GetAppInfo(guid string) AppInfo { appInfo, ok := air.cache[guid] if !ok { - app := air.cfClient.AppByGuid(guid) - appInfo = AppInfo{ - AppName: app.Name, - SpaceGUID: app.SpaceData.Entity.Guid, - SpaceName: app.SpaceData.Entity.Name, - OrgGUID: app.SpaceData.Entity.OrgData.Entity.Guid, - OrgName: app.SpaceData.Entity.OrgData.Entity.Name, + app, err := air.cfClient.AppByGuid(guid) + if err != nil { + appInfo = AppInfo{ + AppName: app.Name, + SpaceGUID: app.SpaceData.Entity.Guid, + SpaceName: app.SpaceData.Entity.Name, + OrgGUID: app.SpaceData.Entity.OrgData.Entity.Guid, + OrgName: app.SpaceData.Entity.OrgData.Entity.Name, + } + air.cache[guid] = appInfo } - air.cache[guid] = appInfo } return appInfo } diff --git a/src/stackdriver-nozzle/cloudfoundry/firehose.go b/src/stackdriver-nozzle/cloudfoundry/firehose.go index 98a371ad..038303f6 100644 --- a/src/stackdriver-nozzle/cloudfoundry/firehose.go +++ b/src/stackdriver-nozzle/cloudfoundry/firehose.go @@ -68,8 +68,8 @@ func (ct *cfClientTokenRefresh) RefreshAuthToken() (token string, err error) { // // TODO: Track https://github.com/cloudfoundry-community/go-cfclient/issues/34 for // updates on proper refresh token handling. - token = ct.cfClient.GetToken() - if token == "" { + token, err = ct.cfClient.GetToken() + if token == "" && err == nil { err = fmt.Errorf("Fatal: error getting refresh token") } return diff --git a/src/stackdriver-nozzle/main.go b/src/stackdriver-nozzle/main.go index a193bf3a..9e331178 100644 --- a/src/stackdriver-nozzle/main.go +++ b/src/stackdriver-nozzle/main.go @@ -142,7 +142,10 @@ func newApp() *app { Username: c.Username, Password: c.Password, SkipSslValidation: c.SkipSSL} - cfClient := cfclient.NewClient(cfConfig) + cfClient, err := cfclient.NewClient(cfConfig) + if err != nil { + logger.Error("cfClient", err) + } var appInfoRepository cloudfoundry.AppInfoRepository if c.ResolveAppMetadata { diff --git a/src/stackdriver-nozzle/nozzle/filter_sink_test.go b/src/stackdriver-nozzle/nozzle/filter_sink_test.go index 0a29297e..445f845b 100644 --- a/src/stackdriver-nozzle/nozzle/filter_sink_test.go +++ b/src/stackdriver-nozzle/nozzle/filter_sink_test.go @@ -17,8 +17,6 @@ var _ = Describe("SinkFilter", func() { BeforeEach(func() { allEventTypes = []events.Envelope_EventType{ - events.Envelope_HttpStart, - events.Envelope_HttpStop, events.Envelope_HttpStartStop, events.Envelope_LogMessage, events.Envelope_ValueMetric, diff --git a/src/stackdriver-nozzle/nozzle/metric_sink_test.go b/src/stackdriver-nozzle/nozzle/metric_sink_test.go index b7c562bf..1a6e8bca 100644 --- a/src/stackdriver-nozzle/nozzle/metric_sink_test.go +++ b/src/stackdriver-nozzle/nozzle/metric_sink_test.go @@ -25,6 +25,7 @@ import ( "github.com/cloudfoundry/sonde-go/events" . "github.com/onsi/ginkgo" . "github.com/onsi/gomega" + . "github.com/onsi/gomega/gstruct" ) type mockUnitParser struct { @@ -77,13 +78,15 @@ var _ = Describe("MetricSink", func() { Expect(err).To(BeNil()) metrics := metricBuffer.PostedMetrics - Expect(metrics).To(ConsistOf(stackdriver.Metric{ - Name: "valueMetricName", - Value: 123.456, - Labels: labels, - EventTime: eventTime, - Unit: "{foo}", + Expect(metrics).To(HaveLen(1)) + Expect(metrics[0]).To(MatchAllFields(Fields{ + "Name": Equal("valueMetricName"), + "Value": Equal(123.456), + "Labels": Equal(labels), + "EventTime": Ignore(), + "Unit": Equal("{foo}"), })) + Expect(metrics[0].EventTime.UnixNano()).To(Equal(timeStamp)) Expect(unitParser.lastInput).To(Equal("barUnit")) }) @@ -123,12 +126,18 @@ var _ = Describe("MetricSink", func() { metrics := metricBuffer.PostedMetrics Expect(metrics).To(HaveLen(6)) - Expect(metrics).To(ContainElement(stackdriver.Metric{Name: "diskBytesQuota", Value: float64(1073741824), Labels: labels, EventTime: eventTime, Unit: ""})) - Expect(metrics).To(ContainElement(stackdriver.Metric{Name: "instanceIndex", Value: float64(0), Labels: labels, EventTime: eventTime, Unit: ""})) - Expect(metrics).To(ContainElement(stackdriver.Metric{Name: "cpuPercentage", Value: 0.061651273460637, Labels: labels, EventTime: eventTime, Unit: ""})) - Expect(metrics).To(ContainElement(stackdriver.Metric{Name: "diskBytes", Value: float64(164634624), Labels: labels, EventTime: eventTime, Unit: ""})) - Expect(metrics).To(ContainElement(stackdriver.Metric{Name: "memoryBytes", Value: float64(16601088), Labels: labels, EventTime: eventTime, Unit: ""})) - Expect(metrics).To(ContainElement(stackdriver.Metric{Name: "memoryBytesQuota", Value: float64(33554432), Labels: labels, EventTime: eventTime, Unit: ""})) + eventName := func(element interface{}) string { + return element.(stackdriver.Metric).Name + } + + Expect(metrics).To(MatchAllElements(eventName, Elements{ + "diskBytesQuota": MatchAllFields(Fields{"Name": Ignore(), "Value": Equal(float64(1073741824)), "Labels": Equal(labels), "EventTime": Ignore(), "Unit": Equal("")}), + "instanceIndex": MatchAllFields(Fields{"Name": Ignore(), "Value": Equal(float64(0)), "Labels": Equal(labels), "EventTime": Ignore(), "Unit": Equal("")}), + "cpuPercentage": MatchAllFields(Fields{"Name": Ignore(), "Value": Equal(float64(0.061651273460637)), "Labels": Equal(labels), "EventTime": Ignore(), "Unit": Equal("")}), + "diskBytes": MatchAllFields(Fields{"Name": Ignore(), "Value": Equal(float64(164634624)), "Labels": Equal(labels), "EventTime": Ignore(), "Unit": Equal("")}), + "memoryBytes": MatchAllFields(Fields{"Name": Ignore(), "Value": Equal(float64(16601088)), "Labels": Equal(labels), "EventTime": Ignore(), "Unit": Equal("")}), + "memoryBytesQuota": MatchAllFields(Fields{"Name": Ignore(), "Value": Equal(float64(33554432)), "Labels": Equal(labels), "EventTime": Ignore(), "Unit": Equal("")}), + })) }) It("creates total and delta metrics for CounterEvent", func() { @@ -155,26 +164,30 @@ var _ = Describe("MetricSink", func() { Expect(err).To(BeNil()) metrics := metricBuffer.PostedMetrics - Expect(metrics).To(ConsistOf( - stackdriver.Metric{ - Name: "counterName.delta", - Value: float64(654321), - Labels: labels, - EventTime: eventTime, - Unit: "", - }, - stackdriver.Metric{ - Name: "counterName.total", - Value: float64(123456), - Labels: labels, - EventTime: eventTime, - Unit: "", - }, - )) + + eventName := func(element interface{}) string { + return element.(stackdriver.Metric).Name + } + Expect(metrics).To(MatchAllElements(eventName, Elements{ + "counterName.delta": MatchAllFields(Fields{ + "Name": Ignore(), + "Value": Equal(float64(654321)), + "Labels": Equal(labels), + "EventTime": Ignore(), + "Unit": Equal(""), + }), + "counterName.total": MatchAllFields(Fields{ + "Name": Ignore(), + "Value": Equal(float64(123456)), + "Labels": Equal(labels), + "EventTime": Ignore(), + "Unit": Equal(""), + }), + })) }) It("returns error when envelope contains unhandled event type", func() { - eventType := events.Envelope_HttpStart + eventType := events.Envelope_HttpStartStop envelope := &events.Envelope{ EventType: &eventType, } diff --git a/src/stackdriver-nozzle/stackdriver/metric_client.go b/src/stackdriver-nozzle/stackdriver/metric_client.go index 98273310..25e927a8 100644 --- a/src/stackdriver-nozzle/stackdriver/metric_client.go +++ b/src/stackdriver-nozzle/stackdriver/metric_client.go @@ -21,6 +21,7 @@ import ( "cloud.google.com/go/monitoring/apiv3" "github.com/cloudfoundry-community/stackdriver-tools/src/stackdriver-nozzle/version" + "google.golang.org/api/iterator" "google.golang.org/api/option" metricpb "google.golang.org/genproto/googleapis/api/metric" monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" @@ -65,7 +66,7 @@ func (m *metricClient) ListMetricDescriptors(request *monitoringpb.ListMetricDes descriptors := []*metricpb.MetricDescriptor{} for { metricDescriptor, err := it.Next() - if err == monitoring.Done { + if err == iterator.Done { break } if err != nil { diff --git a/src/stackdriver-nozzle/vendor/cloud.google.com/go/compute/metadata/metadata.go b/src/stackdriver-nozzle/vendor/cloud.google.com/go/compute/metadata/metadata.go index f9d2bef6..e708c031 100644 --- a/src/stackdriver-nozzle/vendor/cloud.google.com/go/compute/metadata/metadata.go +++ b/src/stackdriver-nozzle/vendor/cloud.google.com/go/compute/metadata/metadata.go @@ -34,8 +34,6 @@ import ( "golang.org/x/net/context" "golang.org/x/net/context/ctxhttp" - - "cloud.google.com/go/internal" ) const ( @@ -48,6 +46,8 @@ const ( // This is variable name is not defined by any spec, as far as // I know; it was made up for the Go package. metadataHostEnv = "GCE_METADATA_HOST" + + userAgent = "gcloud-golang/0.1" ) type cachedValue struct { @@ -65,24 +65,20 @@ var ( var ( metaClient = &http.Client{ - Transport: &internal.Transport{ - Base: &http.Transport{ - Dial: (&net.Dialer{ - Timeout: 2 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - ResponseHeaderTimeout: 2 * time.Second, - }, + Transport: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, + ResponseHeaderTimeout: 2 * time.Second, }, } subscribeClient = &http.Client{ - Transport: &internal.Transport{ - Base: &http.Transport{ - Dial: (&net.Dialer{ - Timeout: 2 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - }, + Transport: &http.Transport{ + Dial: (&net.Dialer{ + Timeout: 2 * time.Second, + KeepAlive: 30 * time.Second, + }).Dial, }, } ) @@ -132,6 +128,7 @@ func getETag(client *http.Client, suffix string) (value, etag string, err error) url := "http://" + host + "/computeMetadata/v1/" + suffix req, _ := http.NewRequest("GET", url, nil) req.Header.Set("Metadata-Flavor", "Google") + req.Header.Set("User-Agent", userAgent) res, err := client.Do(req) if err != nil { return "", "", err @@ -202,7 +199,9 @@ func testOnGCE() bool { // Try two strategies in parallel. // See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194 go func() { - res, err := ctxhttp.Get(ctx, metaClient, "http://"+metadataIP) + req, _ := http.NewRequest("GET", "http://"+metadataIP, nil) + req.Header.Set("User-Agent", userAgent) + res, err := ctxhttp.Do(ctx, metaClient, req) if err != nil { resc <- false return diff --git a/src/stackdriver-nozzle/vendor/cloud.google.com/go/internal/cloud.go b/src/stackdriver-nozzle/vendor/cloud.google.com/go/internal/cloud.go deleted file mode 100644 index 8e0c8f8e..00000000 --- a/src/stackdriver-nozzle/vendor/cloud.google.com/go/internal/cloud.go +++ /dev/null @@ -1,64 +0,0 @@ -// Copyright 2014 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package internal provides support for the cloud packages. -// -// Users should not import this package directly. -package internal - -import ( - "fmt" - "net/http" -) - -const userAgent = "gcloud-golang/0.1" - -// Transport is an http.RoundTripper that appends Google Cloud client's -// user-agent to the original request's user-agent header. -type Transport struct { - // TODO(bradfitz): delete internal.Transport. It's too wrappy for what it does. - // Do User-Agent some other way. - - // Base is the actual http.RoundTripper - // requests will use. It must not be nil. - Base http.RoundTripper -} - -// RoundTrip appends a user-agent to the existing user-agent -// header and delegates the request to the base http.RoundTripper. -func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { - req = cloneRequest(req) - ua := req.Header.Get("User-Agent") - if ua == "" { - ua = userAgent - } else { - ua = fmt.Sprintf("%s %s", ua, userAgent) - } - req.Header.Set("User-Agent", ua) - return t.Base.RoundTrip(req) -} - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header) - for k, s := range r.Header { - r2.Header[k] = s - } - return r2 -} diff --git a/src/stackdriver-nozzle/vendor/cloud.google.com/go/internal/retry.go b/src/stackdriver-nozzle/vendor/cloud.google.com/go/internal/retry.go new file mode 100644 index 00000000..f554fbf8 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/cloud.google.com/go/internal/retry.go @@ -0,0 +1,56 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "fmt" + "time" + + gax "github.com/googleapis/gax-go" + + "golang.org/x/net/context" +) + +// Retry calls the supplied function f repeatedly according to the provided +// backoff parameters. It returns when one of the following occurs: +// When f's first return value is true, Retry immediately returns with f's second +// return value. +// When the provided context is done, Retry returns with an error that +// includes both ctx.Error() and the last error returned by f. +func Retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error)) error { + return retry(ctx, bo, f, gax.Sleep) +} + +func retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error), + sleep func(context.Context, time.Duration) error) error { + var lastErr error + for { + stop, err := f() + if stop { + return err + } + // Remember the last "real" error from f. + if err != nil && err != context.Canceled && err != context.DeadlineExceeded { + lastErr = err + } + p := bo.Pause() + if cerr := sleep(ctx, p); cerr != nil { + if lastErr != nil { + return fmt.Errorf("%v; last function err: %v", cerr, lastErr) + } + return cerr + } + } +} diff --git a/src/stackdriver-nozzle/vendor/cloud.google.com/go/internal/version/update_version.sh b/src/stackdriver-nozzle/vendor/cloud.google.com/go/internal/version/update_version.sh new file mode 100755 index 00000000..fecf1f03 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/cloud.google.com/go/internal/version/update_version.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +today=$(date +%Y%m%d) + +sed -i -r -e 's/const Repo = "([0-9]{8})"/const Repo = "'$today'"/' $GOFILE + diff --git a/src/stackdriver-nozzle/vendor/cloud.google.com/go/internal/version/version.go b/src/stackdriver-nozzle/vendor/cloud.google.com/go/internal/version/version.go new file mode 100644 index 00000000..5eb06bac --- /dev/null +++ b/src/stackdriver-nozzle/vendor/cloud.google.com/go/internal/version/version.go @@ -0,0 +1,71 @@ +// Copyright 2016 Google Inc. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate ./update_version.sh + +// Package version contains version information for Google Cloud Client +// Libraries for Go, as reported in request headers. +package version + +import ( + "runtime" + "strings" + "unicode" +) + +// Repo is the current version of the client libraries in this +// repo. It should be a date in YYYYMMDD format. +const Repo = "20170621" + +// Go returns the Go runtime version. The returned string +// has no whitespace. +func Go() string { + return goVersion +} + +var goVersion = goVer(runtime.Version()) + +const develPrefix = "devel +" + +func goVer(s string) string { + if strings.HasPrefix(s, develPrefix) { + s = s[len(develPrefix):] + if p := strings.IndexFunc(s, unicode.IsSpace); p >= 0 { + s = s[:p] + } + return s + } + + if strings.HasPrefix(s, "go1") { + s = s[2:] + var prerelease string + if p := strings.IndexFunc(s, notSemverRune); p >= 0 { + s, prerelease = s[:p], s[p:] + } + if strings.HasSuffix(s, ".") { + s += "0" + } else if strings.Count(s, ".") < 2 { + s += ".0" + } + if prerelease != "" { + s += "-" + prerelease + } + return s + } + return "" +} + +func notSemverRune(r rune) bool { + return strings.IndexRune("0123456789.", r) < 0 +} diff --git a/src/stackdriver-nozzle/vendor/cloud.google.com/go/logging/apiv2/config_client.go b/src/stackdriver-nozzle/vendor/cloud.google.com/go/logging/apiv2/config_client.go index 29438340..3df8653c 100644 --- a/src/stackdriver-nozzle/vendor/cloud.google.com/go/logging/apiv2/config_client.go +++ b/src/stackdriver-nozzle/vendor/cloud.google.com/go/logging/apiv2/config_client.go @@ -1,10 +1,10 @@ -// Copyright 2016 Google Inc. All Rights Reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -17,27 +17,21 @@ package logging import ( - "fmt" "math" - "runtime" "time" + "cloud.google.com/go/internal/version" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" + "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/api/transport" loggingpb "google.golang.org/genproto/googleapis/logging/v2" "google.golang.org/grpc" "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" ) -var ( - configParentPathTemplate = gax.MustCompilePathTemplate("projects/{project}") - configSinkPathTemplate = gax.MustCompilePathTemplate("projects/{project}/sinks/{sink}") -) - -// ConfigCallOptions contains the retry settings for each method of this client. +// ConfigCallOptions contains the retry settings for each method of ConfigClient. type ConfigCallOptions struct { ListSinks []gax.CallOption GetSink []gax.CallOption @@ -49,13 +43,7 @@ type ConfigCallOptions struct { func defaultConfigClientOptions() []option.ClientOption { return []option.ClientOption{ option.WithEndpoint("logging.googleapis.com:443"), - option.WithScopes( - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.read", - "https://www.googleapis.com/auth/logging.write", - ), + option.WithScopes(DefaultAuthScopes()...), } } @@ -65,6 +53,7 @@ func defaultConfigCallOptions() *ConfigCallOptions { gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.DeadlineExceeded, + codes.Internal, codes.Unavailable, }, gax.Backoff{ Initial: 100 * time.Millisecond, @@ -74,7 +63,6 @@ func defaultConfigCallOptions() *ConfigCallOptions { }), }, } - return &ConfigCallOptions{ ListSinks: retry[[2]string{"default", "idempotent"}], GetSink: retry[[2]string{"default", "idempotent"}], @@ -84,25 +72,25 @@ func defaultConfigCallOptions() *ConfigCallOptions { } } -// ConfigClient is a client for interacting with ConfigServiceV2. +// ConfigClient is a client for interacting with Stackdriver Logging API. type ConfigClient struct { // The connection to the service. conn *grpc.ClientConn // The gRPC API client. - client loggingpb.ConfigServiceV2Client + configClient loggingpb.ConfigServiceV2Client // The call options for this service. CallOptions *ConfigCallOptions // The metadata to be sent with each request. - metadata map[string][]string + xGoogHeader []string } -// NewConfigClient creates a new config service client. +// NewConfigClient creates a new config service v2 client. // -// Service for configuring sinks used to export log entries outside Stackdriver -// Logging. +// Service for configuring sinks used to export log entries outside of +// Stackdriver Logging. func NewConfigClient(ctx context.Context, opts ...option.ClientOption) (*ConfigClient, error) { conn, err := transport.DialGRPC(ctx, append(defaultConfigClientOptions(), opts...)...) if err != nil { @@ -110,10 +98,11 @@ func NewConfigClient(ctx context.Context, opts ...option.ClientOption) (*ConfigC } c := &ConfigClient{ conn: conn, - client: loggingpb.NewConfigServiceV2Client(conn), CallOptions: defaultConfigCallOptions(), + + configClient: loggingpb.NewConfigServiceV2Client(conn), } - c.SetGoogleClientInfo("gax", gax.Version) + c.SetGoogleClientInfo() return c, nil } @@ -131,197 +120,175 @@ func (c *ConfigClient) Close() error { // SetGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. -func (c *ConfigClient) SetGoogleClientInfo(name, version string) { - c.metadata = map[string][]string{ - "x-goog-api-client": {fmt.Sprintf("%s/%s %s gax/%s go/%s", name, version, gapicNameVersion, gax.Version, runtime.Version())}, - } +func (c *ConfigClient) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogHeader = []string{gax.XGoogHeader(kv...)} } -// ParentPath returns the path for the parent resource. -func ConfigParentPath(project string) string { - path, err := configParentPathTemplate.Render(map[string]string{ - "project": project, - }) - if err != nil { - panic(err) - } - return path +// ConfigProjectPath returns the path for the project resource. +func ConfigProjectPath(project string) string { + return "" + + "projects/" + + project + + "" } -// SinkPath returns the path for the sink resource. -func ConfigSinkPath(project string, sink string) string { - path, err := configSinkPathTemplate.Render(map[string]string{ - "project": project, - "sink": sink, - }) - if err != nil { - panic(err) - } - return path +// ConfigSinkPath returns the path for the sink resource. +func ConfigSinkPath(project, sink string) string { + return "" + + "projects/" + + project + + "/sinks/" + + sink + + "" } // ListSinks lists sinks. -func (c *ConfigClient) ListSinks(ctx context.Context, req *loggingpb.ListSinksRequest) *LogSinkIterator { - ctx = metadata.NewContext(ctx, c.metadata) +func (c *ConfigClient) ListSinks(ctx context.Context, req *loggingpb.ListSinksRequest, opts ...gax.CallOption) *LogSinkIterator { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.ListSinks[0:len(c.CallOptions.ListSinks):len(c.CallOptions.ListSinks)], opts...) it := &LogSinkIterator{} - it.apiCall = func() error { + it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogSink, string, error) { var resp *loggingpb.ListSinksResponse - err := gax.Invoke(ctx, func(ctx context.Context) error { + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - req.PageToken = it.nextPageToken - req.PageSize = it.pageSize - resp, err = c.client.ListSinks(ctx, req) + resp, err = c.configClient.ListSinks(ctx, req, settings.GRPC...) return err - }, c.CallOptions.ListSinks...) + }, opts...) if err != nil { - return err + return nil, "", err } - if resp.NextPageToken == "" { - it.atLastPage = true + return resp.Sinks, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err } - it.nextPageToken = resp.NextPageToken - it.items = resp.Sinks - return nil + it.items = append(it.items, items...) + return nextPageToken, nil } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) return it } // GetSink gets a sink. -func (c *ConfigClient) GetSink(ctx context.Context, req *loggingpb.GetSinkRequest) (*loggingpb.LogSink, error) { - ctx = metadata.NewContext(ctx, c.metadata) +func (c *ConfigClient) GetSink(ctx context.Context, req *loggingpb.GetSinkRequest, opts ...gax.CallOption) (*loggingpb.LogSink, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.GetSink[0:len(c.CallOptions.GetSink):len(c.CallOptions.GetSink)], opts...) var resp *loggingpb.LogSink - err := gax.Invoke(ctx, func(ctx context.Context) error { + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.client.GetSink(ctx, req) + resp, err = c.configClient.GetSink(ctx, req, settings.GRPC...) return err - }, c.CallOptions.GetSink...) + }, opts...) if err != nil { return nil, err } return resp, nil } -// CreateSink creates a sink. -func (c *ConfigClient) CreateSink(ctx context.Context, req *loggingpb.CreateSinkRequest) (*loggingpb.LogSink, error) { - ctx = metadata.NewContext(ctx, c.metadata) +// CreateSink creates a sink that exports specified log entries to a destination. The +// export of newly-ingested log entries begins immediately, unless the current +// time is outside the sink's start and end times or the sink's +// writer_identity is not permitted to write to the destination. A sink can +// export log entries only from the resource owning the sink. +func (c *ConfigClient) CreateSink(ctx context.Context, req *loggingpb.CreateSinkRequest, opts ...gax.CallOption) (*loggingpb.LogSink, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.CreateSink[0:len(c.CallOptions.CreateSink):len(c.CallOptions.CreateSink)], opts...) var resp *loggingpb.LogSink - err := gax.Invoke(ctx, func(ctx context.Context) error { + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.client.CreateSink(ctx, req) + resp, err = c.configClient.CreateSink(ctx, req, settings.GRPC...) return err - }, c.CallOptions.CreateSink...) + }, opts...) if err != nil { return nil, err } return resp, nil } -// UpdateSink creates or updates a sink. -func (c *ConfigClient) UpdateSink(ctx context.Context, req *loggingpb.UpdateSinkRequest) (*loggingpb.LogSink, error) { - ctx = metadata.NewContext(ctx, c.metadata) +// UpdateSink updates a sink. If the named sink doesn't exist, then this method is +// identical to +// sinks.create (at /logging/docs/api/reference/rest/v2/projects.sinks/create). +// If the named sink does exist, then this method replaces the following +// fields in the existing sink with values from the new sink: destination, +// filter, output_version_format, start_time, and end_time. +// The updated filter might also have a new writer_identity; see the +// unique_writer_identity field. +func (c *ConfigClient) UpdateSink(ctx context.Context, req *loggingpb.UpdateSinkRequest, opts ...gax.CallOption) (*loggingpb.LogSink, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.UpdateSink[0:len(c.CallOptions.UpdateSink):len(c.CallOptions.UpdateSink)], opts...) var resp *loggingpb.LogSink - err := gax.Invoke(ctx, func(ctx context.Context) error { + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.client.UpdateSink(ctx, req) + resp, err = c.configClient.UpdateSink(ctx, req, settings.GRPC...) return err - }, c.CallOptions.UpdateSink...) + }, opts...) if err != nil { return nil, err } return resp, nil } -// DeleteSink deletes a sink. -func (c *ConfigClient) DeleteSink(ctx context.Context, req *loggingpb.DeleteSinkRequest) error { - ctx = metadata.NewContext(ctx, c.metadata) - err := gax.Invoke(ctx, func(ctx context.Context) error { +// DeleteSink deletes a sink. If the sink has a unique writer_identity, then that +// service account is also deleted. +func (c *ConfigClient) DeleteSink(ctx context.Context, req *loggingpb.DeleteSinkRequest, opts ...gax.CallOption) error { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.DeleteSink[0:len(c.CallOptions.DeleteSink):len(c.CallOptions.DeleteSink)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - _, err = c.client.DeleteSink(ctx, req) + _, err = c.configClient.DeleteSink(ctx, req, settings.GRPC...) return err - }, c.CallOptions.DeleteSink...) + }, opts...) return err } // LogSinkIterator manages a stream of *loggingpb.LogSink. type LogSinkIterator struct { - // The current page data. - items []*loggingpb.LogSink - atLastPage bool - currentIndex int - pageSize int32 - nextPageToken string - apiCall func() error -} - -// NextPage returns the next page of results. -// It will return at most the number of results specified by the last call to SetPageSize. -// If SetPageSize was never called or was called with a value less than 1, -// the page size is determined by the underlying service. -// -// NextPage may return a second return value of Done along with the last page of results. After -// NextPage returns Done, all subsequent calls to NextPage will return (nil, Done). -// -// Next and NextPage should not be used with the same iterator. -func (it *LogSinkIterator) NextPage() ([]*loggingpb.LogSink, error) { - if it.atLastPage { - // We already returned Done with the last page of items. Continue to - // return Done, but with no items. - return nil, Done - } - if err := it.apiCall(); err != nil { - return nil, err - } - if it.atLastPage { - return it.items, Done - } - return it.items, nil -} + items []*loggingpb.LogSink + pageInfo *iterator.PageInfo + nextFunc func() error -// Next returns the next result. Its second return value is Done if there are no more results. -// Once next returns Done, all subsequent calls will return Done. -// -// Internally, Next retrieves results in bulk. You can call SetPageSize as a performance hint to -// affect how many results are retrieved in a single RPC. -// -// SetPageToken should not be called when using Next. -// -// Next and NextPage should not be used with the same iterator. -func (it *LogSinkIterator) Next() (*loggingpb.LogSink, error) { - for it.currentIndex >= len(it.items) { - if it.atLastPage { - return nil, Done - } - if err := it.apiCall(); err != nil { - return nil, err - } - it.currentIndex = 0 - } - result := it.items[it.currentIndex] - it.currentIndex++ - return result, nil + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*loggingpb.LogSink, nextPageToken string, err error) } -// PageSize returns the page size for all subsequent calls to NextPage. -func (it *LogSinkIterator) PageSize() int { - return int(it.pageSize) +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *LogSinkIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo } -// SetPageSize sets the page size for all subsequent calls to NextPage. -func (it *LogSinkIterator) SetPageSize(pageSize int) { - if pageSize > math.MaxInt32 { - pageSize = math.MaxInt32 +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *LogSinkIterator) Next() (*loggingpb.LogSink, error) { + var item *loggingpb.LogSink + if err := it.nextFunc(); err != nil { + return item, err } - it.pageSize = int32(pageSize) + item = it.items[0] + it.items = it.items[1:] + return item, nil } -// SetPageToken sets the page token for the next call to NextPage, to resume the iteration from -// a previous point. -func (it *LogSinkIterator) SetPageToken(token string) { - it.nextPageToken = token +func (it *LogSinkIterator) bufLen() int { + return len(it.items) } -// NextPageToken returns a page token that can be used with SetPageToken to resume -// iteration from the next page. It returns the empty string if there are no more pages. -func (it *LogSinkIterator) NextPageToken() string { - return it.nextPageToken +func (it *LogSinkIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b } diff --git a/src/stackdriver-nozzle/vendor/cloud.google.com/go/logging/apiv2/doc.go b/src/stackdriver-nozzle/vendor/cloud.google.com/go/logging/apiv2/doc.go index 30381824..77c43c7d 100644 --- a/src/stackdriver-nozzle/vendor/cloud.google.com/go/logging/apiv2/doc.go +++ b/src/stackdriver-nozzle/vendor/cloud.google.com/go/logging/apiv2/doc.go @@ -1,10 +1,10 @@ -// Copyright 2016 Google Inc. All Rights Reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -15,8 +15,33 @@ // AUTO-GENERATED CODE. DO NOT EDIT. // Package logging is an experimental, auto-generated package for the -// logging API. +// Stackdriver Logging API. // -// The Google Cloud Logging API lets you write log entries and manage your +// The Stackdriver Logging API lets you write log entries and manage your // logs, log sinks and logs-based metrics. +// +// Use the client at cloud.google.com/go/logging in preference to this. package logging // import "cloud.google.com/go/logging/apiv2" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertXGoog(ctx context.Context, val []string) context.Context { + md, _ := metadata.FromOutgoingContext(ctx) + md = md.Copy() + md["x-goog-api-client"] = val + return metadata.NewOutgoingContext(ctx, md) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/cloud-platform.read-only", + "https://www.googleapis.com/auth/logging.admin", + "https://www.googleapis.com/auth/logging.read", + "https://www.googleapis.com/auth/logging.write", + } +} diff --git a/src/stackdriver-nozzle/vendor/cloud.google.com/go/logging/apiv2/logging.go b/src/stackdriver-nozzle/vendor/cloud.google.com/go/logging/apiv2/logging.go deleted file mode 100644 index fb979476..00000000 --- a/src/stackdriver-nozzle/vendor/cloud.google.com/go/logging/apiv2/logging.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2016 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// AUTO-GENERATED CODE. DO NOT EDIT. - -package logging - -import "errors" - -const ( - gapicNameVersion = "gapic/0.1.0" -) - -// Done is returned by iterators on successful completion. -var Done = errors.New("iterator done") diff --git a/src/stackdriver-nozzle/vendor/cloud.google.com/go/logging/apiv2/logging_client.go b/src/stackdriver-nozzle/vendor/cloud.google.com/go/logging/apiv2/logging_client.go index daa838d0..4f64ff0c 100644 --- a/src/stackdriver-nozzle/vendor/cloud.google.com/go/logging/apiv2/logging_client.go +++ b/src/stackdriver-nozzle/vendor/cloud.google.com/go/logging/apiv2/logging_client.go @@ -1,10 +1,10 @@ -// Copyright 2016 Google Inc. All Rights Reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -17,45 +17,34 @@ package logging import ( - "fmt" "math" - "runtime" "time" + "cloud.google.com/go/internal/version" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" + "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/api/transport" monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" loggingpb "google.golang.org/genproto/googleapis/logging/v2" "google.golang.org/grpc" "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" ) -var ( - loggingParentPathTemplate = gax.MustCompilePathTemplate("projects/{project}") - loggingLogPathTemplate = gax.MustCompilePathTemplate("projects/{project}/logs/{log}") -) - -// CallOptions contains the retry settings for each method of this client. +// CallOptions contains the retry settings for each method of Client. type CallOptions struct { DeleteLog []gax.CallOption WriteLogEntries []gax.CallOption ListLogEntries []gax.CallOption ListMonitoredResourceDescriptors []gax.CallOption + ListLogs []gax.CallOption } func defaultClientOptions() []option.ClientOption { return []option.ClientOption{ option.WithEndpoint("logging.googleapis.com:443"), - option.WithScopes( - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.read", - "https://www.googleapis.com/auth/logging.write", - ), + option.WithScopes(DefaultAuthScopes()...), } } @@ -65,6 +54,7 @@ func defaultCallOptions() *CallOptions { gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.DeadlineExceeded, + codes.Internal, codes.Unavailable, }, gax.Backoff{ Initial: 100 * time.Millisecond, @@ -77,6 +67,7 @@ func defaultCallOptions() *CallOptions { gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.DeadlineExceeded, + codes.Internal, codes.Unavailable, }, gax.Backoff{ Initial: 100 * time.Millisecond, @@ -86,16 +77,16 @@ func defaultCallOptions() *CallOptions { }), }, } - return &CallOptions{ DeleteLog: retry[[2]string{"default", "idempotent"}], WriteLogEntries: retry[[2]string{"default", "non_idempotent"}], ListLogEntries: retry[[2]string{"list", "idempotent"}], ListMonitoredResourceDescriptors: retry[[2]string{"default", "idempotent"}], + ListLogs: retry[[2]string{"default", "idempotent"}], } } -// Client is a client for interacting with LoggingServiceV2. +// Client is a client for interacting with Stackdriver Logging API. type Client struct { // The connection to the service. conn *grpc.ClientConn @@ -107,10 +98,10 @@ type Client struct { CallOptions *CallOptions // The metadata to be sent with each request. - metadata map[string][]string + xGoogHeader []string } -// NewClient creates a new logging service client. +// NewClient creates a new logging service v2 client. // // Service for ingesting and querying logs. func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) { @@ -120,10 +111,11 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error } c := &Client{ conn: conn, - client: loggingpb.NewLoggingServiceV2Client(conn), CallOptions: defaultCallOptions(), + + client: loggingpb.NewLoggingServiceV2Client(conn), } - c.SetGoogleClientInfo("gax", gax.Version) + c.SetGoogleClientInfo() return c, nil } @@ -141,281 +133,292 @@ func (c *Client) Close() error { // SetGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. -func (c *Client) SetGoogleClientInfo(name, version string) { - c.metadata = map[string][]string{ - "x-goog-api-client": {fmt.Sprintf("%s/%s %s gax/%s go/%s", name, version, gapicNameVersion, gax.Version, runtime.Version())}, - } +func (c *Client) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogHeader = []string{gax.XGoogHeader(kv...)} } -// ParentPath returns the path for the parent resource. -func LoggingParentPath(project string) string { - path, err := loggingParentPathTemplate.Render(map[string]string{ - "project": project, - }) - if err != nil { - panic(err) - } - return path +// ProjectPath returns the path for the project resource. +func ProjectPath(project string) string { + return "" + + "projects/" + + project + + "" } // LogPath returns the path for the log resource. -func LoggingLogPath(project string, log string) string { - path, err := loggingLogPathTemplate.Render(map[string]string{ - "project": project, - "log": log, - }) - if err != nil { - panic(err) - } - return path +func LogPath(project, log string) string { + return "" + + "projects/" + + project + + "/logs/" + + log + + "" } -// DeleteLog deletes a log and all its log entries. -// The log will reappear if it receives new entries. -func (c *Client) DeleteLog(ctx context.Context, req *loggingpb.DeleteLogRequest) error { - ctx = metadata.NewContext(ctx, c.metadata) - err := gax.Invoke(ctx, func(ctx context.Context) error { +// DeleteLog deletes all the log entries in a log. +// The log reappears if it receives new entries. +// Log entries written shortly before the delete operation might not be +// deleted. +func (c *Client) DeleteLog(ctx context.Context, req *loggingpb.DeleteLogRequest, opts ...gax.CallOption) error { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.DeleteLog[0:len(c.CallOptions.DeleteLog):len(c.CallOptions.DeleteLog)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - _, err = c.client.DeleteLog(ctx, req) + _, err = c.client.DeleteLog(ctx, req, settings.GRPC...) return err - }, c.CallOptions.DeleteLog...) + }, opts...) return err } -// WriteLogEntries writes log entries to Stackdriver Logging. All log entries are -// written by this method. -func (c *Client) WriteLogEntries(ctx context.Context, req *loggingpb.WriteLogEntriesRequest) (*loggingpb.WriteLogEntriesResponse, error) { - ctx = metadata.NewContext(ctx, c.metadata) +// WriteLogEntries writes log entries to Stackdriver Logging. +func (c *Client) WriteLogEntries(ctx context.Context, req *loggingpb.WriteLogEntriesRequest, opts ...gax.CallOption) (*loggingpb.WriteLogEntriesResponse, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.WriteLogEntries[0:len(c.CallOptions.WriteLogEntries):len(c.CallOptions.WriteLogEntries)], opts...) var resp *loggingpb.WriteLogEntriesResponse - err := gax.Invoke(ctx, func(ctx context.Context) error { + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.client.WriteLogEntries(ctx, req) + resp, err = c.client.WriteLogEntries(ctx, req, settings.GRPC...) return err - }, c.CallOptions.WriteLogEntries...) + }, opts...) if err != nil { return nil, err } return resp, nil } -// ListLogEntries lists log entries. Use this method to retrieve log entries from Cloud -// Logging. For ways to export log entries, see -// [Exporting Logs](/logging/docs/export). -func (c *Client) ListLogEntries(ctx context.Context, req *loggingpb.ListLogEntriesRequest) *LogEntryIterator { - ctx = metadata.NewContext(ctx, c.metadata) +// ListLogEntries lists log entries. Use this method to retrieve log entries from +// Stackdriver Logging. For ways to export log entries, see +// Exporting Logs (at /logging/docs/export). +func (c *Client) ListLogEntries(ctx context.Context, req *loggingpb.ListLogEntriesRequest, opts ...gax.CallOption) *LogEntryIterator { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.ListLogEntries[0:len(c.CallOptions.ListLogEntries):len(c.CallOptions.ListLogEntries)], opts...) it := &LogEntryIterator{} - it.apiCall = func() error { + it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogEntry, string, error) { var resp *loggingpb.ListLogEntriesResponse - err := gax.Invoke(ctx, func(ctx context.Context) error { + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - req.PageToken = it.nextPageToken - req.PageSize = it.pageSize - resp, err = c.client.ListLogEntries(ctx, req) + resp, err = c.client.ListLogEntries(ctx, req, settings.GRPC...) return err - }, c.CallOptions.ListLogEntries...) + }, opts...) if err != nil { - return err + return nil, "", err } - if resp.NextPageToken == "" { - it.atLastPage = true + return resp.Entries, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err } - it.nextPageToken = resp.NextPageToken - it.items = resp.Entries - return nil + it.items = append(it.items, items...) + return nextPageToken, nil } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) return it } -// ListMonitoredResourceDescriptors lists the monitored resource descriptors used by Stackdriver Logging. -func (c *Client) ListMonitoredResourceDescriptors(ctx context.Context, req *loggingpb.ListMonitoredResourceDescriptorsRequest) *MonitoredResourceDescriptorIterator { - ctx = metadata.NewContext(ctx, c.metadata) +// ListMonitoredResourceDescriptors lists the descriptors for monitored resource types used by Stackdriver +// Logging. +func (c *Client) ListMonitoredResourceDescriptors(ctx context.Context, req *loggingpb.ListMonitoredResourceDescriptorsRequest, opts ...gax.CallOption) *MonitoredResourceDescriptorIterator { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.ListMonitoredResourceDescriptors[0:len(c.CallOptions.ListMonitoredResourceDescriptors):len(c.CallOptions.ListMonitoredResourceDescriptors)], opts...) it := &MonitoredResourceDescriptorIterator{} - it.apiCall = func() error { + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResourceDescriptor, string, error) { var resp *loggingpb.ListMonitoredResourceDescriptorsResponse - err := gax.Invoke(ctx, func(ctx context.Context) error { + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - req.PageToken = it.nextPageToken - req.PageSize = it.pageSize - resp, err = c.client.ListMonitoredResourceDescriptors(ctx, req) + resp, err = c.client.ListMonitoredResourceDescriptors(ctx, req, settings.GRPC...) return err - }, c.CallOptions.ListMonitoredResourceDescriptors...) + }, opts...) if err != nil { - return err + return nil, "", err } - if resp.NextPageToken == "" { - it.atLastPage = true + return resp.ResourceDescriptors, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err } - it.nextPageToken = resp.NextPageToken - it.items = resp.ResourceDescriptors - return nil + it.items = append(it.items, items...) + return nextPageToken, nil } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) return it } -// LogEntryIterator manages a stream of *loggingpb.LogEntry. -type LogEntryIterator struct { - // The current page data. - items []*loggingpb.LogEntry - atLastPage bool - currentIndex int - pageSize int32 - nextPageToken string - apiCall func() error -} - -// NextPage returns the next page of results. -// It will return at most the number of results specified by the last call to SetPageSize. -// If SetPageSize was never called or was called with a value less than 1, -// the page size is determined by the underlying service. -// -// NextPage may return a second return value of Done along with the last page of results. After -// NextPage returns Done, all subsequent calls to NextPage will return (nil, Done). -// -// Next and NextPage should not be used with the same iterator. -func (it *LogEntryIterator) NextPage() ([]*loggingpb.LogEntry, error) { - if it.atLastPage { - // We already returned Done with the last page of items. Continue to - // return Done, but with no items. - return nil, Done - } - if err := it.apiCall(); err != nil { - return nil, err +// ListLogs lists the logs in projects, organizations, folders, or billing accounts. +// Only logs that have entries are listed. +func (c *Client) ListLogs(ctx context.Context, req *loggingpb.ListLogsRequest, opts ...gax.CallOption) *StringIterator { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.ListLogs[0:len(c.CallOptions.ListLogs):len(c.CallOptions.ListLogs)], opts...) + it := &StringIterator{} + it.InternalFetch = func(pageSize int, pageToken string) ([]string, string, error) { + var resp *loggingpb.ListLogsResponse + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { + var err error + resp, err = c.client.ListLogs(ctx, req, settings.GRPC...) + return err + }, opts...) + if err != nil { + return nil, "", err + } + return resp.LogNames, resp.NextPageToken, nil } - if it.atLastPage { - return it.items, Done + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err + } + it.items = append(it.items, items...) + return nextPageToken, nil } - return it.items, nil + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) + return it } -// Next returns the next result. Its second return value is Done if there are no more results. -// Once next returns Done, all subsequent calls will return Done. -// -// Internally, Next retrieves results in bulk. You can call SetPageSize as a performance hint to -// affect how many results are retrieved in a single RPC. -// -// SetPageToken should not be called when using Next. -// -// Next and NextPage should not be used with the same iterator. -func (it *LogEntryIterator) Next() (*loggingpb.LogEntry, error) { - for it.currentIndex >= len(it.items) { - if it.atLastPage { - return nil, Done - } - if err := it.apiCall(); err != nil { - return nil, err - } - it.currentIndex = 0 - } - result := it.items[it.currentIndex] - it.currentIndex++ - return result, nil +// LogEntryIterator manages a stream of *loggingpb.LogEntry. +type LogEntryIterator struct { + items []*loggingpb.LogEntry + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*loggingpb.LogEntry, nextPageToken string, err error) } -// PageSize returns the page size for all subsequent calls to NextPage. -func (it *LogEntryIterator) PageSize() int { - return int(it.pageSize) +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *LogEntryIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo } -// SetPageSize sets the page size for all subsequent calls to NextPage. -func (it *LogEntryIterator) SetPageSize(pageSize int) { - if pageSize > math.MaxInt32 { - pageSize = math.MaxInt32 +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *LogEntryIterator) Next() (*loggingpb.LogEntry, error) { + var item *loggingpb.LogEntry + if err := it.nextFunc(); err != nil { + return item, err } - it.pageSize = int32(pageSize) + item = it.items[0] + it.items = it.items[1:] + return item, nil } -// SetPageToken sets the page token for the next call to NextPage, to resume the iteration from -// a previous point. -func (it *LogEntryIterator) SetPageToken(token string) { - it.nextPageToken = token +func (it *LogEntryIterator) bufLen() int { + return len(it.items) } -// NextPageToken returns a page token that can be used with SetPageToken to resume -// iteration from the next page. It returns the empty string if there are no more pages. -func (it *LogEntryIterator) NextPageToken() string { - return it.nextPageToken +func (it *LogEntryIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b } // MonitoredResourceDescriptorIterator manages a stream of *monitoredrespb.MonitoredResourceDescriptor. type MonitoredResourceDescriptorIterator struct { - // The current page data. - items []*monitoredrespb.MonitoredResourceDescriptor - atLastPage bool - currentIndex int - pageSize int32 - nextPageToken string - apiCall func() error + items []*monitoredrespb.MonitoredResourceDescriptor + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoredrespb.MonitoredResourceDescriptor, nextPageToken string, err error) } -// NextPage returns the next page of results. -// It will return at most the number of results specified by the last call to SetPageSize. -// If SetPageSize was never called or was called with a value less than 1, -// the page size is determined by the underlying service. -// -// NextPage may return a second return value of Done along with the last page of results. After -// NextPage returns Done, all subsequent calls to NextPage will return (nil, Done). -// -// Next and NextPage should not be used with the same iterator. -func (it *MonitoredResourceDescriptorIterator) NextPage() ([]*monitoredrespb.MonitoredResourceDescriptor, error) { - if it.atLastPage { - // We already returned Done with the last page of items. Continue to - // return Done, but with no items. - return nil, Done - } - if err := it.apiCall(); err != nil { - return nil, err - } - if it.atLastPage { - return it.items, Done - } - return it.items, nil +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *MonitoredResourceDescriptorIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo } -// Next returns the next result. Its second return value is Done if there are no more results. -// Once next returns Done, all subsequent calls will return Done. -// -// Internally, Next retrieves results in bulk. You can call SetPageSize as a performance hint to -// affect how many results are retrieved in a single RPC. -// -// SetPageToken should not be called when using Next. -// -// Next and NextPage should not be used with the same iterator. +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. func (it *MonitoredResourceDescriptorIterator) Next() (*monitoredrespb.MonitoredResourceDescriptor, error) { - for it.currentIndex >= len(it.items) { - if it.atLastPage { - return nil, Done - } - if err := it.apiCall(); err != nil { - return nil, err - } - it.currentIndex = 0 + var item *monitoredrespb.MonitoredResourceDescriptor + if err := it.nextFunc(); err != nil { + return item, err } - result := it.items[it.currentIndex] - it.currentIndex++ - return result, nil + item = it.items[0] + it.items = it.items[1:] + return item, nil +} + +func (it *MonitoredResourceDescriptorIterator) bufLen() int { + return len(it.items) +} + +func (it *MonitoredResourceDescriptorIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b +} + +// StringIterator manages a stream of string. +type StringIterator struct { + items []string + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []string, nextPageToken string, err error) } -// PageSize returns the page size for all subsequent calls to NextPage. -func (it *MonitoredResourceDescriptorIterator) PageSize() int { - return int(it.pageSize) +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *StringIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo } -// SetPageSize sets the page size for all subsequent calls to NextPage. -func (it *MonitoredResourceDescriptorIterator) SetPageSize(pageSize int) { - if pageSize > math.MaxInt32 { - pageSize = math.MaxInt32 +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *StringIterator) Next() (string, error) { + var item string + if err := it.nextFunc(); err != nil { + return item, err } - it.pageSize = int32(pageSize) + item = it.items[0] + it.items = it.items[1:] + return item, nil } -// SetPageToken sets the page token for the next call to NextPage, to resume the iteration from -// a previous point. -func (it *MonitoredResourceDescriptorIterator) SetPageToken(token string) { - it.nextPageToken = token +func (it *StringIterator) bufLen() int { + return len(it.items) } -// NextPageToken returns a page token that can be used with SetPageToken to resume -// iteration from the next page. It returns the empty string if there are no more pages. -func (it *MonitoredResourceDescriptorIterator) NextPageToken() string { - return it.nextPageToken +func (it *StringIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b } diff --git a/src/stackdriver-nozzle/vendor/cloud.google.com/go/logging/apiv2/metrics_client.go b/src/stackdriver-nozzle/vendor/cloud.google.com/go/logging/apiv2/metrics_client.go index 9c65913c..e4cc57f3 100644 --- a/src/stackdriver-nozzle/vendor/cloud.google.com/go/logging/apiv2/metrics_client.go +++ b/src/stackdriver-nozzle/vendor/cloud.google.com/go/logging/apiv2/metrics_client.go @@ -1,10 +1,10 @@ -// Copyright 2016 Google Inc. All Rights Reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -17,27 +17,21 @@ package logging import ( - "fmt" "math" - "runtime" "time" + "cloud.google.com/go/internal/version" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" + "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/api/transport" loggingpb "google.golang.org/genproto/googleapis/logging/v2" "google.golang.org/grpc" "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" ) -var ( - metricsParentPathTemplate = gax.MustCompilePathTemplate("projects/{project}") - metricsMetricPathTemplate = gax.MustCompilePathTemplate("projects/{project}/metrics/{metric}") -) - -// MetricsCallOptions contains the retry settings for each method of this client. +// MetricsCallOptions contains the retry settings for each method of MetricsClient. type MetricsCallOptions struct { ListLogMetrics []gax.CallOption GetLogMetric []gax.CallOption @@ -49,13 +43,7 @@ type MetricsCallOptions struct { func defaultMetricsClientOptions() []option.ClientOption { return []option.ClientOption{ option.WithEndpoint("logging.googleapis.com:443"), - option.WithScopes( - "https://www.googleapis.com/auth/cloud-platform", - "https://www.googleapis.com/auth/cloud-platform.read-only", - "https://www.googleapis.com/auth/logging.admin", - "https://www.googleapis.com/auth/logging.read", - "https://www.googleapis.com/auth/logging.write", - ), + option.WithScopes(DefaultAuthScopes()...), } } @@ -65,6 +53,7 @@ func defaultMetricsCallOptions() *MetricsCallOptions { gax.WithRetry(func() gax.Retryer { return gax.OnCodes([]codes.Code{ codes.DeadlineExceeded, + codes.Internal, codes.Unavailable, }, gax.Backoff{ Initial: 100 * time.Millisecond, @@ -74,7 +63,6 @@ func defaultMetricsCallOptions() *MetricsCallOptions { }), }, } - return &MetricsCallOptions{ ListLogMetrics: retry[[2]string{"default", "idempotent"}], GetLogMetric: retry[[2]string{"default", "idempotent"}], @@ -84,22 +72,22 @@ func defaultMetricsCallOptions() *MetricsCallOptions { } } -// MetricsClient is a client for interacting with MetricsServiceV2. +// MetricsClient is a client for interacting with Stackdriver Logging API. type MetricsClient struct { // The connection to the service. conn *grpc.ClientConn // The gRPC API client. - client loggingpb.MetricsServiceV2Client + metricsClient loggingpb.MetricsServiceV2Client // The call options for this service. CallOptions *MetricsCallOptions // The metadata to be sent with each request. - metadata map[string][]string + xGoogHeader []string } -// NewMetricsClient creates a new metrics service client. +// NewMetricsClient creates a new metrics service v2 client. // // Service for configuring logs-based metrics. func NewMetricsClient(ctx context.Context, opts ...option.ClientOption) (*MetricsClient, error) { @@ -109,10 +97,11 @@ func NewMetricsClient(ctx context.Context, opts ...option.ClientOption) (*Metric } c := &MetricsClient{ conn: conn, - client: loggingpb.NewMetricsServiceV2Client(conn), CallOptions: defaultMetricsCallOptions(), + + metricsClient: loggingpb.NewMetricsServiceV2Client(conn), } - c.SetGoogleClientInfo("gax", gax.Version) + c.SetGoogleClientInfo() return c, nil } @@ -130,70 +119,75 @@ func (c *MetricsClient) Close() error { // SetGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. -func (c *MetricsClient) SetGoogleClientInfo(name, version string) { - c.metadata = map[string][]string{ - "x-goog-api-client": {fmt.Sprintf("%s/%s %s gax/%s go/%s", name, version, gapicNameVersion, gax.Version, runtime.Version())}, - } +func (c *MetricsClient) SetGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogHeader = []string{gax.XGoogHeader(kv...)} } -// ParentPath returns the path for the parent resource. -func MetricsParentPath(project string) string { - path, err := metricsParentPathTemplate.Render(map[string]string{ - "project": project, - }) - if err != nil { - panic(err) - } - return path +// MetricsProjectPath returns the path for the project resource. +func MetricsProjectPath(project string) string { + return "" + + "projects/" + + project + + "" } -// MetricPath returns the path for the metric resource. -func MetricsMetricPath(project string, metric string) string { - path, err := metricsMetricPathTemplate.Render(map[string]string{ - "project": project, - "metric": metric, - }) - if err != nil { - panic(err) - } - return path +// MetricsMetricPath returns the path for the metric resource. +func MetricsMetricPath(project, metric string) string { + return "" + + "projects/" + + project + + "/metrics/" + + metric + + "" } // ListLogMetrics lists logs-based metrics. -func (c *MetricsClient) ListLogMetrics(ctx context.Context, req *loggingpb.ListLogMetricsRequest) *LogMetricIterator { - ctx = metadata.NewContext(ctx, c.metadata) +func (c *MetricsClient) ListLogMetrics(ctx context.Context, req *loggingpb.ListLogMetricsRequest, opts ...gax.CallOption) *LogMetricIterator { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.ListLogMetrics[0:len(c.CallOptions.ListLogMetrics):len(c.CallOptions.ListLogMetrics)], opts...) it := &LogMetricIterator{} - it.apiCall = func() error { + it.InternalFetch = func(pageSize int, pageToken string) ([]*loggingpb.LogMetric, string, error) { var resp *loggingpb.ListLogMetricsResponse - err := gax.Invoke(ctx, func(ctx context.Context) error { + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - req.PageToken = it.nextPageToken - req.PageSize = it.pageSize - resp, err = c.client.ListLogMetrics(ctx, req) + resp, err = c.metricsClient.ListLogMetrics(ctx, req, settings.GRPC...) return err - }, c.CallOptions.ListLogMetrics...) + }, opts...) if err != nil { - return err + return nil, "", err } - if resp.NextPageToken == "" { - it.atLastPage = true + return resp.Metrics, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err } - it.nextPageToken = resp.NextPageToken - it.items = resp.Metrics - return nil + it.items = append(it.items, items...) + return nextPageToken, nil } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) return it } // GetLogMetric gets a logs-based metric. -func (c *MetricsClient) GetLogMetric(ctx context.Context, req *loggingpb.GetLogMetricRequest) (*loggingpb.LogMetric, error) { - ctx = metadata.NewContext(ctx, c.metadata) +func (c *MetricsClient) GetLogMetric(ctx context.Context, req *loggingpb.GetLogMetricRequest, opts ...gax.CallOption) (*loggingpb.LogMetric, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.GetLogMetric[0:len(c.CallOptions.GetLogMetric):len(c.CallOptions.GetLogMetric)], opts...) var resp *loggingpb.LogMetric - err := gax.Invoke(ctx, func(ctx context.Context) error { + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.client.GetLogMetric(ctx, req) + resp, err = c.metricsClient.GetLogMetric(ctx, req, settings.GRPC...) return err - }, c.CallOptions.GetLogMetric...) + }, opts...) if err != nil { return nil, err } @@ -201,14 +195,15 @@ func (c *MetricsClient) GetLogMetric(ctx context.Context, req *loggingpb.GetLogM } // CreateLogMetric creates a logs-based metric. -func (c *MetricsClient) CreateLogMetric(ctx context.Context, req *loggingpb.CreateLogMetricRequest) (*loggingpb.LogMetric, error) { - ctx = metadata.NewContext(ctx, c.metadata) +func (c *MetricsClient) CreateLogMetric(ctx context.Context, req *loggingpb.CreateLogMetricRequest, opts ...gax.CallOption) (*loggingpb.LogMetric, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.CreateLogMetric[0:len(c.CallOptions.CreateLogMetric):len(c.CallOptions.CreateLogMetric)], opts...) var resp *loggingpb.LogMetric - err := gax.Invoke(ctx, func(ctx context.Context) error { + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.client.CreateLogMetric(ctx, req) + resp, err = c.metricsClient.CreateLogMetric(ctx, req, settings.GRPC...) return err - }, c.CallOptions.CreateLogMetric...) + }, opts...) if err != nil { return nil, err } @@ -216,14 +211,15 @@ func (c *MetricsClient) CreateLogMetric(ctx context.Context, req *loggingpb.Crea } // UpdateLogMetric creates or updates a logs-based metric. -func (c *MetricsClient) UpdateLogMetric(ctx context.Context, req *loggingpb.UpdateLogMetricRequest) (*loggingpb.LogMetric, error) { - ctx = metadata.NewContext(ctx, c.metadata) +func (c *MetricsClient) UpdateLogMetric(ctx context.Context, req *loggingpb.UpdateLogMetricRequest, opts ...gax.CallOption) (*loggingpb.LogMetric, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.UpdateLogMetric[0:len(c.CallOptions.UpdateLogMetric):len(c.CallOptions.UpdateLogMetric)], opts...) var resp *loggingpb.LogMetric - err := gax.Invoke(ctx, func(ctx context.Context) error { + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.client.UpdateLogMetric(ctx, req) + resp, err = c.metricsClient.UpdateLogMetric(ctx, req, settings.GRPC...) return err - }, c.CallOptions.UpdateLogMetric...) + }, opts...) if err != nil { return nil, err } @@ -231,96 +227,55 @@ func (c *MetricsClient) UpdateLogMetric(ctx context.Context, req *loggingpb.Upda } // DeleteLogMetric deletes a logs-based metric. -func (c *MetricsClient) DeleteLogMetric(ctx context.Context, req *loggingpb.DeleteLogMetricRequest) error { - ctx = metadata.NewContext(ctx, c.metadata) - err := gax.Invoke(ctx, func(ctx context.Context) error { +func (c *MetricsClient) DeleteLogMetric(ctx context.Context, req *loggingpb.DeleteLogMetricRequest, opts ...gax.CallOption) error { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.DeleteLogMetric[0:len(c.CallOptions.DeleteLogMetric):len(c.CallOptions.DeleteLogMetric)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - _, err = c.client.DeleteLogMetric(ctx, req) + _, err = c.metricsClient.DeleteLogMetric(ctx, req, settings.GRPC...) return err - }, c.CallOptions.DeleteLogMetric...) + }, opts...) return err } // LogMetricIterator manages a stream of *loggingpb.LogMetric. type LogMetricIterator struct { - // The current page data. - items []*loggingpb.LogMetric - atLastPage bool - currentIndex int - pageSize int32 - nextPageToken string - apiCall func() error -} - -// NextPage returns the next page of results. -// It will return at most the number of results specified by the last call to SetPageSize. -// If SetPageSize was never called or was called with a value less than 1, -// the page size is determined by the underlying service. -// -// NextPage may return a second return value of Done along with the last page of results. After -// NextPage returns Done, all subsequent calls to NextPage will return (nil, Done). -// -// Next and NextPage should not be used with the same iterator. -func (it *LogMetricIterator) NextPage() ([]*loggingpb.LogMetric, error) { - if it.atLastPage { - // We already returned Done with the last page of items. Continue to - // return Done, but with no items. - return nil, Done - } - if err := it.apiCall(); err != nil { - return nil, err - } - if it.atLastPage { - return it.items, Done - } - return it.items, nil -} + items []*loggingpb.LogMetric + pageInfo *iterator.PageInfo + nextFunc func() error -// Next returns the next result. Its second return value is Done if there are no more results. -// Once next returns Done, all subsequent calls will return Done. -// -// Internally, Next retrieves results in bulk. You can call SetPageSize as a performance hint to -// affect how many results are retrieved in a single RPC. -// -// SetPageToken should not be called when using Next. -// -// Next and NextPage should not be used with the same iterator. -func (it *LogMetricIterator) Next() (*loggingpb.LogMetric, error) { - for it.currentIndex >= len(it.items) { - if it.atLastPage { - return nil, Done - } - if err := it.apiCall(); err != nil { - return nil, err - } - it.currentIndex = 0 - } - result := it.items[it.currentIndex] - it.currentIndex++ - return result, nil + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*loggingpb.LogMetric, nextPageToken string, err error) } -// PageSize returns the page size for all subsequent calls to NextPage. -func (it *LogMetricIterator) PageSize() int { - return int(it.pageSize) +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *LogMetricIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo } -// SetPageSize sets the page size for all subsequent calls to NextPage. -func (it *LogMetricIterator) SetPageSize(pageSize int) { - if pageSize > math.MaxInt32 { - pageSize = math.MaxInt32 +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *LogMetricIterator) Next() (*loggingpb.LogMetric, error) { + var item *loggingpb.LogMetric + if err := it.nextFunc(); err != nil { + return item, err } - it.pageSize = int32(pageSize) + item = it.items[0] + it.items = it.items[1:] + return item, nil } -// SetPageToken sets the page token for the next call to NextPage, to resume the iteration from -// a previous point. -func (it *LogMetricIterator) SetPageToken(token string) { - it.nextPageToken = token +func (it *LogMetricIterator) bufLen() int { + return len(it.items) } -// NextPageToken returns a page token that can be used with SetPageToken to resume -// iteration from the next page. It returns the empty string if there are no more pages. -func (it *LogMetricIterator) NextPageToken() string { - return it.nextPageToken +func (it *LogMetricIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b } diff --git a/src/stackdriver-nozzle/vendor/cloud.google.com/go/logging/doc.go b/src/stackdriver-nozzle/vendor/cloud.google.com/go/logging/doc.go index 6da3adf1..32ca717f 100644 --- a/src/stackdriver-nozzle/vendor/cloud.google.com/go/logging/doc.go +++ b/src/stackdriver-nozzle/vendor/cloud.google.com/go/logging/doc.go @@ -20,7 +20,8 @@ see package cloud.google.com/go/logging/logadmin. This client uses Logging API v2. See https://cloud.google.com/logging/docs/api/v2/ for an introduction to the API. -This package is experimental and subject to API changes. + +Note: This package is in beta. Some backwards-incompatible changes may occur. Creating a Client diff --git a/src/stackdriver-nozzle/vendor/cloud.google.com/go/logging/internal/common.go b/src/stackdriver-nozzle/vendor/cloud.google.com/go/logging/internal/common.go index 7d8ece09..38cfbb5f 100644 --- a/src/stackdriver-nozzle/vendor/cloud.google.com/go/logging/internal/common.go +++ b/src/stackdriver-nozzle/vendor/cloud.google.com/go/logging/internal/common.go @@ -28,3 +28,12 @@ func LogPath(parent, logID string) string { logID = strings.Replace(logID, "/", "%2F", -1) return fmt.Sprintf("%s/logs/%s", parent, logID) } + +func LogIDFromPath(parent, path string) string { + start := len(parent) + len("/logs/") + if len(path) < start { + return "" + } + logID := path[start:] + return strings.Replace(logID, "%2F", "/", -1) +} diff --git a/src/stackdriver-nozzle/vendor/cloud.google.com/go/logging/logging.go b/src/stackdriver-nozzle/vendor/cloud.google.com/go/logging/logging.go index 48e0bbe1..5738adcf 100644 --- a/src/stackdriver-nozzle/vendor/cloud.google.com/go/logging/logging.go +++ b/src/stackdriver-nozzle/vendor/cloud.google.com/go/logging/logging.go @@ -36,7 +36,8 @@ import ( "sync" "time" - "cloud.google.com/go/internal/bundler" + "cloud.google.com/go/compute/metadata" + "cloud.google.com/go/internal/version" vkit "cloud.google.com/go/logging/apiv2" "cloud.google.com/go/logging/internal" "github.com/golang/protobuf/proto" @@ -45,6 +46,7 @@ import ( tspb "github.com/golang/protobuf/ptypes/timestamp" "golang.org/x/net/context" "google.golang.org/api/option" + "google.golang.org/api/support/bundler" mrpb "google.golang.org/genproto/googleapis/api/monitoredres" logtypepb "google.golang.org/genproto/googleapis/logging/type" logpb "google.golang.org/genproto/googleapis/logging/v2" @@ -70,7 +72,7 @@ const ( DefaultDelayThreshold = time.Second // DefaultEntryCountThreshold is the default value for the EntryCountThreshold LoggerOption. - DefaultEntryCountThreshold = 10 + DefaultEntryCountThreshold = 1000 // DefaultEntryByteThreshold is the default value for the EntryByteThreshold LoggerOption. DefaultEntryByteThreshold = 1 << 20 // 1MiB @@ -84,7 +86,11 @@ var now = time.Now // ErrOverflow signals that the number of buffered entries for a Logger // exceeds its BufferLimit. -var ErrOverflow = errors.New("logging: log entry overflowed buffer limits") +var ErrOverflow = bundler.ErrOverflow + +// ErrOversizedEntry signals that an entry's size exceeds the maximum number of +// bytes that will be sent in a single call to the logging service. +var ErrOversizedEntry = bundler.ErrOversizedItem // Client is a Logging client. A Client is associated with a single Cloud project. type Client struct { @@ -95,6 +101,10 @@ type Client struct { loggers sync.WaitGroup // so we can wait for loggers to close closed bool + mu sync.Mutex + nErrs int // number of errors we saw + lastErr error // last error we saw + // OnError is called when an error occurs in a call to Log or Flush. The // error may be due to an invalid Entry, an overflow because BufferLimit // was reached (in which case the error will be ErrOverflow) or an error @@ -125,7 +135,7 @@ func NewClient(ctx context.Context, projectID string, opts ...option.ClientOptio if err != nil { return nil, err } - c.SetGoogleClientInfo("logging", internal.Version) + c.SetGoogleClientInfo("gccl", version.Repo) client := &Client{ client: c, projectID: projectID, @@ -170,18 +180,43 @@ func init() { // log entry "ping" to a log named "ping". func (c *Client) Ping(ctx context.Context) error { ent := &logpb.LogEntry{ - Payload: &logpb.LogEntry_TextPayload{"ping"}, + Payload: &logpb.LogEntry_TextPayload{TextPayload: "ping"}, Timestamp: unixZeroTimestamp, // Identical timestamps and insert IDs are both InsertId: "ping", // necessary for the service to dedup these entries. } _, err := c.client.WriteLogEntries(ctx, &logpb.WriteLogEntriesRequest{ LogName: internal.LogPath(c.parent(), "ping"), - Resource: &mrpb.MonitoredResource{Type: "global"}, + Resource: globalResource(c.projectID), Entries: []*logpb.LogEntry{ent}, }) return err } +// error puts the error on the client's error channel +// without blocking, and records summary error info. +func (c *Client) error(err error) { + select { + case c.errc <- err: + default: + } + c.mu.Lock() + c.lastErr = err + c.nErrs++ + c.mu.Unlock() +} + +func (c *Client) extractErrorInfo() error { + var err error + c.mu.Lock() + if c.lastErr != nil { + err = fmt.Errorf("saw %d errors; last: %v", c.nErrs, c.lastErr) + c.nErrs = 0 + c.lastErr = nil + } + c.mu.Unlock() + return err +} + // A Logger is used to write log messages to a single log. It can be configured // with a log ID, common monitored resource, and a set of common labels. type Logger struct { @@ -201,14 +236,58 @@ type LoggerOption interface { } // CommonResource sets the monitored resource associated with all log entries -// written from a Logger. If not provided, a resource of type "global" is used. -// This value can be overridden by setting an Entry's Resource field. +// written from a Logger. If not provided, the resource is automatically +// detected based on the running environment. This value can be overridden +// per-entry by setting an Entry's Resource field. func CommonResource(r *mrpb.MonitoredResource) LoggerOption { return commonResource{r} } type commonResource struct{ *mrpb.MonitoredResource } func (r commonResource) set(l *Logger) { l.commonResource = r.MonitoredResource } +var detectedResource struct { + pb *mrpb.MonitoredResource + once sync.Once +} + +func detectResource() *mrpb.MonitoredResource { + detectedResource.once.Do(func() { + if !metadata.OnGCE() { + return + } + projectID, err := metadata.ProjectID() + if err != nil { + return + } + id, err := metadata.InstanceID() + if err != nil { + return + } + zone, err := metadata.Zone() + if err != nil { + return + } + detectedResource.pb = &mrpb.MonitoredResource{ + Type: "gce_instance", + Labels: map[string]string{ + "project_id": projectID, + "instance_id": id, + "zone": zone, + }, + } + }) + return detectedResource.pb +} + +func globalResource(projectID string) *mrpb.MonitoredResource { + return &mrpb.MonitoredResource{ + Type: "global", + Labels: map[string]string{ + "project_id": projectID, + }, + } +} + // CommonLabels are labels that apply to all log entries written from a Logger, // so that you don't have to repeat them in each log entry's Labels field. If // any of the log entries contains a (key, value) with the same key that is in @@ -256,10 +335,10 @@ type entryByteThreshold int func (e entryByteThreshold) set(l *Logger) { l.bundler.BundleByteThreshold = int(e) } // EntryByteLimit is the maximum number of bytes of entries that will be sent -// in a single call to the logging service. This option limits the size of a -// single RPC payload, to account for network or service issues with large -// RPCs. If EntryByteLimit is smaller than EntryByteThreshold, the latter has -// no effect. +// in a single call to the logging service. ErrOversizedEntry is returned if an +// entry exceeds EntryByteLimit. This option limits the size of a single RPC +// payload, to account for network or service issues with large RPCs. If +// EntryByteLimit is smaller than EntryByteThreshold, the latter has no effect. // The default is zero, meaning there is no limit. func EntryByteLimit(n int) LoggerOption { return entryByteLimit(n) } @@ -287,10 +366,14 @@ func (b bufferedByteLimit) set(l *Logger) { l.bundler.BufferedByteLimit = int(b) // characters: [A-Za-z0-9]; and punctuation characters: forward-slash, // underscore, hyphen, and period. func (c *Client) Logger(logID string, opts ...LoggerOption) *Logger { + r := detectResource() + if r == nil { + r = globalResource(c.projectID) + } l := &Logger{ client: c, logName: internal.LogPath(c.parent(), logID), - commonResource: &mrpb.MonitoredResource{Type: "global"}, + commonResource: r, } // TODO(jba): determine the right context for the bundle handler. ctx := context.TODO() @@ -313,7 +396,7 @@ func (c *Client) Logger(logID string, opts ...LoggerOption) *Logger { go func() { defer c.loggers.Done() <-c.donec - l.bundler.Close() + l.bundler.Flush() }() return l } @@ -331,7 +414,7 @@ func (w severityWriter) Write(p []byte) (n int, err error) { return len(p), nil } -// Close closes the client. +// Close waits for all opened loggers to be flushed and closes the client. func (c *Client) Close() error { if c.closed { return nil @@ -340,9 +423,12 @@ func (c *Client) Close() error { c.loggers.Wait() // wait for all bundlers to flush and close // Now there can be no more errors. close(c.errc) // terminate error goroutine - // Return only the first error. Since all clients share an underlying connection, - // Closes after the first always report a "connection is closing" error. - err := c.client.Close() + // Prefer logging errors to close errors. + err := c.extractErrorInfo() + err2 := c.client.Close() + if err == nil { + err = err2 + } c.closed = true return err } @@ -452,6 +538,11 @@ type Entry struct { // by the client when reading entries. It is an error to set it when // writing entries. Resource *mrpb.MonitoredResource + + // Trace is the resource name of the trace associated with the log entry, + // if any. If it contains a relative resource name, the name is assumed to + // be relative to //tracing.googleapis.com. + Trace string } // HTTPRequest contains an http.Request as well as additional @@ -472,6 +563,14 @@ type HTTPRequest struct { // including the response headers and the response body. ResponseSize int64 + // Latency is the request processing latency on the server, from the time the request was + // received until the response was sent. + Latency time.Duration + + // LocalIP is the IP address (IPv4 or IPv6) of the origin server that the request + // was sent to. + LocalIP string + // RemoteIP is the IP address (IPv4 or IPv6) of the client that issued the // HTTP request. Examples: "192.168.1.1", "FE80::0202:B3FF:FE1E:8329". RemoteIP string @@ -495,23 +594,32 @@ func fromHTTPRequest(r *HTTPRequest) *logtypepb.HttpRequest { } u := *r.Request.URL u.Fragment = "" - return &logtypepb.HttpRequest{ + pb := &logtypepb.HttpRequest{ RequestMethod: r.Request.Method, RequestUrl: u.String(), RequestSize: r.RequestSize, Status: int32(r.Status), ResponseSize: r.ResponseSize, UserAgent: r.Request.UserAgent(), + ServerIp: r.LocalIP, RemoteIp: r.RemoteIP, // TODO(jba): attempt to parse http.Request.RemoteAddr? Referer: r.Request.Referer(), CacheHit: r.CacheHit, CacheValidatedWithOriginServer: r.CacheValidatedWithOriginServer, } + if r.Latency != 0 { + pb.Latency = ptypes.DurationProto(r.Latency) + } + return pb } // toProtoStruct converts v, which must marshal into a JSON object, // into a Google Struct proto. func toProtoStruct(v interface{}) (*structpb.Struct, error) { + // Fast path: if v is already a *structpb.Struct, nothing to do. + if s, ok := v.(*structpb.Struct); ok { + return s, nil + } // v is a Go struct that supports JSON marshalling. We want a Struct // protobuf. Some day we may have a more direct way to get there, but right // now the only way is to marshal the Go struct to JSON, unmarshal into a @@ -539,21 +647,21 @@ func jsonMapToProtoStruct(m map[string]interface{}) *structpb.Struct { func jsonValueToStructValue(v interface{}) *structpb.Value { switch x := v.(type) { case bool: - return &structpb.Value{Kind: &structpb.Value_BoolValue{x}} + return &structpb.Value{Kind: &structpb.Value_BoolValue{BoolValue: x}} case float64: - return &structpb.Value{Kind: &structpb.Value_NumberValue{x}} + return &structpb.Value{Kind: &structpb.Value_NumberValue{NumberValue: x}} case string: - return &structpb.Value{Kind: &structpb.Value_StringValue{x}} + return &structpb.Value{Kind: &structpb.Value_StringValue{StringValue: x}} case nil: return &structpb.Value{Kind: &structpb.Value_NullValue{}} case map[string]interface{}: - return &structpb.Value{Kind: &structpb.Value_StructValue{jsonMapToProtoStruct(x)}} + return &structpb.Value{Kind: &structpb.Value_StructValue{StructValue: jsonMapToProtoStruct(x)}} case []interface{}: var vals []*structpb.Value for _, e := range x { vals = append(vals, jsonValueToStructValue(e)) } - return &structpb.Value{Kind: &structpb.Value_ListValue{&structpb.ListValue{vals}}} + return &structpb.Value{Kind: &structpb.Value_ListValue{ListValue: &structpb.ListValue{Values: vals}}} default: panic(fmt.Sprintf("bad type %T for JSON value", v)) } @@ -581,17 +689,23 @@ func (l *Logger) LogSync(ctx context.Context, e Entry) error { func (l *Logger) Log(e Entry) { ent, err := toLogEntry(e) if err != nil { - l.error(err) + l.client.error(err) return } if err := l.bundler.Add(ent, proto.Size(ent)); err != nil { - l.error(err) + l.client.error(err) } } // Flush blocks until all currently buffered log entries are sent. -func (l *Logger) Flush() { +// +// If any errors occurred since the last call to Flush from any Logger, or the +// creation of the client if this is the first call, then Flush returns a non-nil +// error with summary information about the errors. This information is unlikely to +// be actionable. For more accurate error reporting, set Client.OnError. +func (l *Logger) Flush() error { l.bundler.Flush() + return l.client.extractErrorInfo() } func (l *Logger) writeLogEntries(ctx context.Context, entries []*logpb.LogEntry) { @@ -603,16 +717,7 @@ func (l *Logger) writeLogEntries(ctx context.Context, entries []*logpb.LogEntry) } _, err := l.client.client.WriteLogEntries(ctx, req) if err != nil { - l.error(err) - } -} - -// error puts the error on the client's error channel -// without blocking. -func (l *Logger) error(err error) { - select { - case l.client.errc <- err: - default: + l.client.error(err) } } @@ -649,17 +754,18 @@ func toLogEntry(e Entry) (*logpb.LogEntry, error) { HttpRequest: fromHTTPRequest(e.HTTPRequest), Operation: e.Operation, Labels: e.Labels, + Trace: e.Trace, } switch p := e.Payload.(type) { case string: - ent.Payload = &logpb.LogEntry_TextPayload{p} + ent.Payload = &logpb.LogEntry_TextPayload{TextPayload: p} default: s, err := toProtoStruct(p) if err != nil { return nil, err } - ent.Payload = &logpb.LogEntry_JsonPayload{s} + ent.Payload = &logpb.LogEntry_JsonPayload{JsonPayload: s} } return ent, nil } diff --git a/src/stackdriver-nozzle/vendor/cloud.google.com/go/monitoring/apiv3/agent_translation_client.go b/src/stackdriver-nozzle/vendor/cloud.google.com/go/monitoring/apiv3/agent_translation_client.go deleted file mode 100644 index 1b6f5d3f..00000000 --- a/src/stackdriver-nozzle/vendor/cloud.google.com/go/monitoring/apiv3/agent_translation_client.go +++ /dev/null @@ -1,134 +0,0 @@ -// Copyright 2016 Google Inc. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// AUTO-GENERATED CODE. DO NOT EDIT. - -package monitoring - -import ( - "fmt" - "runtime" - - gax "github.com/googleapis/gax-go" - "golang.org/x/net/context" - "google.golang.org/api/option" - "google.golang.org/api/transport" - monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" - "google.golang.org/grpc" - "google.golang.org/grpc/metadata" -) - -var ( - agentTranslationProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}") -) - -// AgentTranslationCallOptions contains the retry settings for each method of this client. -type AgentTranslationCallOptions struct { - CreateCollectdTimeSeries []gax.CallOption -} - -func defaultAgentTranslationClientOptions() []option.ClientOption { - return []option.ClientOption{ - option.WithEndpoint("monitoring.googleapis.com:443"), - option.WithScopes(), - } -} - -func defaultAgentTranslationCallOptions() *AgentTranslationCallOptions { - retry := map[[2]string][]gax.CallOption{} - - return &AgentTranslationCallOptions{ - CreateCollectdTimeSeries: retry[[2]string{"default", "non_idempotent"}], - } -} - -// AgentTranslationClient is a client for interacting with AgentTranslationService. -type AgentTranslationClient struct { - // The connection to the service. - conn *grpc.ClientConn - - // The gRPC API client. - client monitoringpb.AgentTranslationServiceClient - - // The call options for this service. - CallOptions *AgentTranslationCallOptions - - // The metadata to be sent with each request. - metadata map[string][]string -} - -// NewAgentTranslationClient creates a new agent_translation service client. -// -// The AgentTranslation API allows `collectd`-based agents to -// write time series data to Cloud Monitoring. -// See [google.monitoring.v3.MetricService.CreateTimeSeries] instead. -func NewAgentTranslationClient(ctx context.Context, opts ...option.ClientOption) (*AgentTranslationClient, error) { - conn, err := transport.DialGRPC(ctx, append(defaultAgentTranslationClientOptions(), opts...)...) - if err != nil { - return nil, err - } - c := &AgentTranslationClient{ - conn: conn, - client: monitoringpb.NewAgentTranslationServiceClient(conn), - CallOptions: defaultAgentTranslationCallOptions(), - } - c.SetGoogleClientInfo("gax", gax.Version) - return c, nil -} - -// Connection returns the client's connection to the API service. -func (c *AgentTranslationClient) Connection() *grpc.ClientConn { - return c.conn -} - -// Close closes the connection to the API service. The user should invoke this when -// the client is no longer required. -func (c *AgentTranslationClient) Close() error { - return c.conn.Close() -} - -// SetGoogleClientInfo sets the name and version of the application in -// the `x-goog-api-client` header passed on each request. Intended for -// use by Google-written clients. -func (c *AgentTranslationClient) SetGoogleClientInfo(name, version string) { - c.metadata = map[string][]string{ - "x-goog-api-client": {fmt.Sprintf("%s/%s %s gax/%s go/%s", name, version, gapicNameVersion, gax.Version, runtime.Version())}, - } -} - -// ProjectPath returns the path for the project resource. -func AgentTranslationProjectPath(project string) string { - path, err := agentTranslationProjectPathTemplate.Render(map[string]string{ - "project": project, - }) - if err != nil { - panic(err) - } - return path -} - -// **Stackdriver Monitoring Agent only:** Creates a new time series. -// -// -func (c *AgentTranslationClient) CreateCollectdTimeSeries(ctx context.Context, req *monitoringpb.CreateCollectdTimeSeriesRequest) error { - ctx = metadata.NewContext(ctx, c.metadata) - err := gax.Invoke(ctx, func(ctx context.Context) error { - var err error - _, err = c.client.CreateCollectdTimeSeries(ctx, req) - return err - }, c.CallOptions.CreateCollectdTimeSeries...) - return err -} diff --git a/src/stackdriver-nozzle/vendor/cloud.google.com/go/monitoring/apiv3/doc.go b/src/stackdriver-nozzle/vendor/cloud.google.com/go/monitoring/apiv3/doc.go index 23371b67..426f6a50 100644 --- a/src/stackdriver-nozzle/vendor/cloud.google.com/go/monitoring/apiv3/doc.go +++ b/src/stackdriver-nozzle/vendor/cloud.google.com/go/monitoring/apiv3/doc.go @@ -1,10 +1,10 @@ -// Copyright 2016 Google Inc. All Rights Reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -15,10 +15,31 @@ // AUTO-GENERATED CODE. DO NOT EDIT. // Package monitoring is an experimental, auto-generated package for the -// monitoring API. +// Stackdriver Monitoring API. // // Manages your Stackdriver Monitoring data and configurations. Most projects // must be associated with a Stackdriver account, with a few exceptions as // noted on the individual method pages. -// package monitoring // import "cloud.google.com/go/monitoring/apiv3" + +import ( + "golang.org/x/net/context" + "google.golang.org/grpc/metadata" +) + +func insertXGoog(ctx context.Context, val []string) context.Context { + md, _ := metadata.FromOutgoingContext(ctx) + md = md.Copy() + md["x-goog-api-client"] = val + return metadata.NewOutgoingContext(ctx, md) +} + +// DefaultAuthScopes reports the default set of authentication scopes to use with this package. +func DefaultAuthScopes() []string { + return []string{ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/monitoring", + "https://www.googleapis.com/auth/monitoring.read", + "https://www.googleapis.com/auth/monitoring.write", + } +} diff --git a/src/stackdriver-nozzle/vendor/cloud.google.com/go/monitoring/apiv3/group_client.go b/src/stackdriver-nozzle/vendor/cloud.google.com/go/monitoring/apiv3/group_client.go index 7698dce3..d235c85e 100644 --- a/src/stackdriver-nozzle/vendor/cloud.google.com/go/monitoring/apiv3/group_client.go +++ b/src/stackdriver-nozzle/vendor/cloud.google.com/go/monitoring/apiv3/group_client.go @@ -1,10 +1,10 @@ -// Copyright 2016 Google Inc. All Rights Reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -17,28 +17,22 @@ package monitoring import ( - "fmt" "math" - "runtime" "time" + "cloud.google.com/go/internal/version" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" + "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/api/transport" monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" "google.golang.org/grpc" "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" ) -var ( - groupProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}") - groupGroupPathTemplate = gax.MustCompilePathTemplate("projects/{project}/groups/{group}") -) - -// GroupCallOptions contains the retry settings for each method of this client. +// GroupCallOptions contains the retry settings for each method of GroupClient. type GroupCallOptions struct { ListGroups []gax.CallOption GetGroup []gax.CallOption @@ -51,7 +45,7 @@ type GroupCallOptions struct { func defaultGroupClientOptions() []option.ClientOption { return []option.ClientOption{ option.WithEndpoint("monitoring.googleapis.com:443"), - option.WithScopes(), + option.WithScopes(DefaultAuthScopes()...), } } @@ -70,7 +64,6 @@ func defaultGroupCallOptions() *GroupCallOptions { }), }, } - return &GroupCallOptions{ ListGroups: retry[[2]string{"default", "idempotent"}], GetGroup: retry[[2]string{"default", "idempotent"}], @@ -81,25 +74,25 @@ func defaultGroupCallOptions() *GroupCallOptions { } } -// GroupClient is a client for interacting with GroupService. +// GroupClient is a client for interacting with Stackdriver Monitoring API. type GroupClient struct { // The connection to the service. conn *grpc.ClientConn // The gRPC API client. - client monitoringpb.GroupServiceClient + groupClient monitoringpb.GroupServiceClient // The call options for this service. CallOptions *GroupCallOptions // The metadata to be sent with each request. - metadata map[string][]string + xGoogHeader []string } // NewGroupClient creates a new group service client. // // The Group API lets you inspect and manage your -// [groups](google.monitoring.v3.Group). +// groups (at google.monitoring.v3.Group). // // A group is a named filter that is used to identify // a collection of monitored resources. Groups are typically used to @@ -117,10 +110,11 @@ func NewGroupClient(ctx context.Context, opts ...option.ClientOption) (*GroupCli } c := &GroupClient{ conn: conn, - client: monitoringpb.NewGroupServiceClient(conn), CallOptions: defaultGroupCallOptions(), + + groupClient: monitoringpb.NewGroupServiceClient(conn), } - c.SetGoogleClientInfo("gax", gax.Version) + c.setGoogleClientInfo() return c, nil } @@ -135,91 +129,94 @@ func (c *GroupClient) Close() error { return c.conn.Close() } -// SetGoogleClientInfo sets the name and version of the application in +// setGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. -func (c *GroupClient) SetGoogleClientInfo(name, version string) { - c.metadata = map[string][]string{ - "x-goog-api-client": {fmt.Sprintf("%s/%s %s gax/%s go/%s", name, version, gapicNameVersion, gax.Version, runtime.Version())}, - } +func (c *GroupClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogHeader = []string{gax.XGoogHeader(kv...)} } -// ProjectPath returns the path for the project resource. +// GroupProjectPath returns the path for the project resource. func GroupProjectPath(project string) string { - path, err := groupProjectPathTemplate.Render(map[string]string{ - "project": project, - }) - if err != nil { - panic(err) - } - return path + return "" + + "projects/" + + project + + "" } -// GroupPath returns the path for the group resource. -func GroupGroupPath(project string, group string) string { - path, err := groupGroupPathTemplate.Render(map[string]string{ - "project": project, - "group": group, - }) - if err != nil { - panic(err) - } - return path +// GroupGroupPath returns the path for the group resource. +func GroupGroupPath(project, group string) string { + return "" + + "projects/" + + project + + "/groups/" + + group + + "" } -// ListGroups lists the existing groups. The project ID in the URL path must refer -// to a Stackdriver account. -func (c *GroupClient) ListGroups(ctx context.Context, req *monitoringpb.ListGroupsRequest) *GroupIterator { - ctx = metadata.NewContext(ctx, c.metadata) +// ListGroups lists the existing groups. +func (c *GroupClient) ListGroups(ctx context.Context, req *monitoringpb.ListGroupsRequest, opts ...gax.CallOption) *GroupIterator { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.ListGroups[0:len(c.CallOptions.ListGroups):len(c.CallOptions.ListGroups)], opts...) it := &GroupIterator{} - it.apiCall = func() error { + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.Group, string, error) { var resp *monitoringpb.ListGroupsResponse - err := gax.Invoke(ctx, func(ctx context.Context) error { + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - req.PageToken = it.nextPageToken - req.PageSize = it.pageSize - resp, err = c.client.ListGroups(ctx, req) + resp, err = c.groupClient.ListGroups(ctx, req, settings.GRPC...) return err - }, c.CallOptions.ListGroups...) + }, opts...) if err != nil { - return err + return nil, "", err } - if resp.NextPageToken == "" { - it.atLastPage = true + return resp.Group, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err } - it.nextPageToken = resp.NextPageToken - it.items = resp.Group - return nil + it.items = append(it.items, items...) + return nextPageToken, nil } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) return it } -// GetGroup gets a single group. The project ID in the URL path must refer to a -// Stackdriver account. -func (c *GroupClient) GetGroup(ctx context.Context, req *monitoringpb.GetGroupRequest) (*monitoringpb.Group, error) { - ctx = metadata.NewContext(ctx, c.metadata) +// GetGroup gets a single group. +func (c *GroupClient) GetGroup(ctx context.Context, req *monitoringpb.GetGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.GetGroup[0:len(c.CallOptions.GetGroup):len(c.CallOptions.GetGroup)], opts...) var resp *monitoringpb.Group - err := gax.Invoke(ctx, func(ctx context.Context) error { + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.client.GetGroup(ctx, req) + resp, err = c.groupClient.GetGroup(ctx, req, settings.GRPC...) return err - }, c.CallOptions.GetGroup...) + }, opts...) if err != nil { return nil, err } return resp, nil } -// CreateGroup creates a new group. The project ID in the URL path must refer to a -// Stackdriver account. -func (c *GroupClient) CreateGroup(ctx context.Context, req *monitoringpb.CreateGroupRequest) (*monitoringpb.Group, error) { - ctx = metadata.NewContext(ctx, c.metadata) +// CreateGroup creates a new group. +func (c *GroupClient) CreateGroup(ctx context.Context, req *monitoringpb.CreateGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.CreateGroup[0:len(c.CallOptions.CreateGroup):len(c.CallOptions.CreateGroup)], opts...) var resp *monitoringpb.Group - err := gax.Invoke(ctx, func(ctx context.Context) error { + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.client.CreateGroup(ctx, req) + resp, err = c.groupClient.CreateGroup(ctx, req, settings.GRPC...) return err - }, c.CallOptions.CreateGroup...) + }, opts...) if err != nil { return nil, err } @@ -227,225 +224,149 @@ func (c *GroupClient) CreateGroup(ctx context.Context, req *monitoringpb.CreateG } // UpdateGroup updates an existing group. -// You can change any group attributes except `name`. -// The project ID in the URL path must refer to a Stackdriver account. -func (c *GroupClient) UpdateGroup(ctx context.Context, req *monitoringpb.UpdateGroupRequest) (*monitoringpb.Group, error) { - ctx = metadata.NewContext(ctx, c.metadata) +// You can change any group attributes except name. +func (c *GroupClient) UpdateGroup(ctx context.Context, req *monitoringpb.UpdateGroupRequest, opts ...gax.CallOption) (*monitoringpb.Group, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.UpdateGroup[0:len(c.CallOptions.UpdateGroup):len(c.CallOptions.UpdateGroup)], opts...) var resp *monitoringpb.Group - err := gax.Invoke(ctx, func(ctx context.Context) error { + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.client.UpdateGroup(ctx, req) + resp, err = c.groupClient.UpdateGroup(ctx, req, settings.GRPC...) return err - }, c.CallOptions.UpdateGroup...) + }, opts...) if err != nil { return nil, err } return resp, nil } -// DeleteGroup deletes an existing group. The project ID in the URL path must refer to a -// Stackdriver account. -func (c *GroupClient) DeleteGroup(ctx context.Context, req *monitoringpb.DeleteGroupRequest) error { - ctx = metadata.NewContext(ctx, c.metadata) - err := gax.Invoke(ctx, func(ctx context.Context) error { +// DeleteGroup deletes an existing group. +func (c *GroupClient) DeleteGroup(ctx context.Context, req *monitoringpb.DeleteGroupRequest, opts ...gax.CallOption) error { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.DeleteGroup[0:len(c.CallOptions.DeleteGroup):len(c.CallOptions.DeleteGroup)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - _, err = c.client.DeleteGroup(ctx, req) + _, err = c.groupClient.DeleteGroup(ctx, req, settings.GRPC...) return err - }, c.CallOptions.DeleteGroup...) + }, opts...) return err } -// ListGroupMembers lists the monitored resources that are members of a group. The project ID -// in the URL path must refer to a Stackdriver account. -func (c *GroupClient) ListGroupMembers(ctx context.Context, req *monitoringpb.ListGroupMembersRequest) *MonitoredResourceIterator { - ctx = metadata.NewContext(ctx, c.metadata) +// ListGroupMembers lists the monitored resources that are members of a group. +func (c *GroupClient) ListGroupMembers(ctx context.Context, req *monitoringpb.ListGroupMembersRequest, opts ...gax.CallOption) *MonitoredResourceIterator { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.ListGroupMembers[0:len(c.CallOptions.ListGroupMembers):len(c.CallOptions.ListGroupMembers)], opts...) it := &MonitoredResourceIterator{} - it.apiCall = func() error { + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResource, string, error) { var resp *monitoringpb.ListGroupMembersResponse - err := gax.Invoke(ctx, func(ctx context.Context) error { + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - req.PageToken = it.nextPageToken - req.PageSize = it.pageSize - resp, err = c.client.ListGroupMembers(ctx, req) + resp, err = c.groupClient.ListGroupMembers(ctx, req, settings.GRPC...) return err - }, c.CallOptions.ListGroupMembers...) + }, opts...) if err != nil { - return err + return nil, "", err } - if resp.NextPageToken == "" { - it.atLastPage = true + return resp.Members, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err } - it.nextPageToken = resp.NextPageToken - it.items = resp.Members - return nil + it.items = append(it.items, items...) + return nextPageToken, nil } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) return it } // GroupIterator manages a stream of *monitoringpb.Group. type GroupIterator struct { - // The current page data. - items []*monitoringpb.Group - atLastPage bool - currentIndex int - pageSize int32 - nextPageToken string - apiCall func() error + items []*monitoringpb.Group + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.Group, nextPageToken string, err error) } -// NextPage returns the next page of results. -// It will return at most the number of results specified by the last call to SetPageSize. -// If SetPageSize was never called or was called with a value less than 1, -// the page size is determined by the underlying service. -// -// NextPage may return a second return value of Done along with the last page of results. After -// NextPage returns Done, all subsequent calls to NextPage will return (nil, Done). -// -// Next and NextPage should not be used with the same iterator. -func (it *GroupIterator) NextPage() ([]*monitoringpb.Group, error) { - if it.atLastPage { - // We already returned Done with the last page of items. Continue to - // return Done, but with no items. - return nil, Done - } - if err := it.apiCall(); err != nil { - return nil, err - } - if it.atLastPage { - return it.items, Done - } - return it.items, nil +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *GroupIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo } -// Next returns the next result. Its second return value is Done if there are no more results. -// Once next returns Done, all subsequent calls will return Done. -// -// Internally, Next retrieves results in bulk. You can call SetPageSize as a performance hint to -// affect how many results are retrieved in a single RPC. -// -// SetPageToken should not be called when using Next. -// -// Next and NextPage should not be used with the same iterator. +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. func (it *GroupIterator) Next() (*monitoringpb.Group, error) { - for it.currentIndex >= len(it.items) { - if it.atLastPage { - return nil, Done - } - if err := it.apiCall(); err != nil { - return nil, err - } - it.currentIndex = 0 - } - result := it.items[it.currentIndex] - it.currentIndex++ - return result, nil -} - -// PageSize returns the page size for all subsequent calls to NextPage. -func (it *GroupIterator) PageSize() int { - return int(it.pageSize) -} - -// SetPageSize sets the page size for all subsequent calls to NextPage. -func (it *GroupIterator) SetPageSize(pageSize int) { - if pageSize > math.MaxInt32 { - pageSize = math.MaxInt32 + var item *monitoringpb.Group + if err := it.nextFunc(); err != nil { + return item, err } - it.pageSize = int32(pageSize) + item = it.items[0] + it.items = it.items[1:] + return item, nil } -// SetPageToken sets the page token for the next call to NextPage, to resume the iteration from -// a previous point. -func (it *GroupIterator) SetPageToken(token string) { - it.nextPageToken = token +func (it *GroupIterator) bufLen() int { + return len(it.items) } -// NextPageToken returns a page token that can be used with SetPageToken to resume -// iteration from the next page. It returns the empty string if there are no more pages. -func (it *GroupIterator) NextPageToken() string { - return it.nextPageToken +func (it *GroupIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b } // MonitoredResourceIterator manages a stream of *monitoredrespb.MonitoredResource. type MonitoredResourceIterator struct { - // The current page data. - items []*monitoredrespb.MonitoredResource - atLastPage bool - currentIndex int - pageSize int32 - nextPageToken string - apiCall func() error + items []*monitoredrespb.MonitoredResource + pageInfo *iterator.PageInfo + nextFunc func() error + + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoredrespb.MonitoredResource, nextPageToken string, err error) } -// NextPage returns the next page of results. -// It will return at most the number of results specified by the last call to SetPageSize. -// If SetPageSize was never called or was called with a value less than 1, -// the page size is determined by the underlying service. -// -// NextPage may return a second return value of Done along with the last page of results. After -// NextPage returns Done, all subsequent calls to NextPage will return (nil, Done). -// -// Next and NextPage should not be used with the same iterator. -func (it *MonitoredResourceIterator) NextPage() ([]*monitoredrespb.MonitoredResource, error) { - if it.atLastPage { - // We already returned Done with the last page of items. Continue to - // return Done, but with no items. - return nil, Done - } - if err := it.apiCall(); err != nil { - return nil, err - } - if it.atLastPage { - return it.items, Done - } - return it.items, nil +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *MonitoredResourceIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo } -// Next returns the next result. Its second return value is Done if there are no more results. -// Once next returns Done, all subsequent calls will return Done. -// -// Internally, Next retrieves results in bulk. You can call SetPageSize as a performance hint to -// affect how many results are retrieved in a single RPC. -// -// SetPageToken should not be called when using Next. -// -// Next and NextPage should not be used with the same iterator. +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. func (it *MonitoredResourceIterator) Next() (*monitoredrespb.MonitoredResource, error) { - for it.currentIndex >= len(it.items) { - if it.atLastPage { - return nil, Done - } - if err := it.apiCall(); err != nil { - return nil, err - } - it.currentIndex = 0 - } - result := it.items[it.currentIndex] - it.currentIndex++ - return result, nil -} - -// PageSize returns the page size for all subsequent calls to NextPage. -func (it *MonitoredResourceIterator) PageSize() int { - return int(it.pageSize) -} - -// SetPageSize sets the page size for all subsequent calls to NextPage. -func (it *MonitoredResourceIterator) SetPageSize(pageSize int) { - if pageSize > math.MaxInt32 { - pageSize = math.MaxInt32 + var item *monitoredrespb.MonitoredResource + if err := it.nextFunc(); err != nil { + return item, err } - it.pageSize = int32(pageSize) + item = it.items[0] + it.items = it.items[1:] + return item, nil } -// SetPageToken sets the page token for the next call to NextPage, to resume the iteration from -// a previous point. -func (it *MonitoredResourceIterator) SetPageToken(token string) { - it.nextPageToken = token +func (it *MonitoredResourceIterator) bufLen() int { + return len(it.items) } -// NextPageToken returns a page token that can be used with SetPageToken to resume -// iteration from the next page. It returns the empty string if there are no more pages. -func (it *MonitoredResourceIterator) NextPageToken() string { - return it.nextPageToken +func (it *MonitoredResourceIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b } diff --git a/src/stackdriver-nozzle/vendor/cloud.google.com/go/monitoring/apiv3/metric_client.go b/src/stackdriver-nozzle/vendor/cloud.google.com/go/monitoring/apiv3/metric_client.go index 206c444a..8b35ce72 100644 --- a/src/stackdriver-nozzle/vendor/cloud.google.com/go/monitoring/apiv3/metric_client.go +++ b/src/stackdriver-nozzle/vendor/cloud.google.com/go/monitoring/apiv3/metric_client.go @@ -1,10 +1,10 @@ -// Copyright 2016 Google Inc. All Rights Reserved. +// Copyright 2017, Google Inc. All rights reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // -// http://www.apache.org/licenses/LICENSE-2.0 +// http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, @@ -17,13 +17,13 @@ package monitoring import ( - "fmt" "math" - "runtime" "time" + "cloud.google.com/go/internal/version" gax "github.com/googleapis/gax-go" "golang.org/x/net/context" + "google.golang.org/api/iterator" "google.golang.org/api/option" "google.golang.org/api/transport" metricpb "google.golang.org/genproto/googleapis/api/metric" @@ -31,16 +31,9 @@ import ( monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" "google.golang.org/grpc" "google.golang.org/grpc/codes" - "google.golang.org/grpc/metadata" ) -var ( - metricProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}") - metricMetricDescriptorPathPathTemplate = gax.MustCompilePathTemplate("projects/{project}/metricDescriptors/{metric_descriptor_path=**}") - metricMonitoredResourceDescriptorPathTemplate = gax.MustCompilePathTemplate("projects/{project}/monitoredResourceDescriptors/{monitored_resource_descriptor}") -) - -// MetricCallOptions contains the retry settings for each method of this client. +// MetricCallOptions contains the retry settings for each method of MetricClient. type MetricCallOptions struct { ListMonitoredResourceDescriptors []gax.CallOption GetMonitoredResourceDescriptor []gax.CallOption @@ -55,7 +48,7 @@ type MetricCallOptions struct { func defaultMetricClientOptions() []option.ClientOption { return []option.ClientOption{ option.WithEndpoint("monitoring.googleapis.com:443"), - option.WithScopes(), + option.WithScopes(DefaultAuthScopes()...), } } @@ -74,7 +67,6 @@ func defaultMetricCallOptions() *MetricCallOptions { }), }, } - return &MetricCallOptions{ ListMonitoredResourceDescriptors: retry[[2]string{"default", "idempotent"}], GetMonitoredResourceDescriptor: retry[[2]string{"default", "idempotent"}], @@ -87,19 +79,19 @@ func defaultMetricCallOptions() *MetricCallOptions { } } -// MetricClient is a client for interacting with MetricService. +// MetricClient is a client for interacting with Stackdriver Monitoring API. type MetricClient struct { // The connection to the service. conn *grpc.ClientConn // The gRPC API client. - client monitoringpb.MetricServiceClient + metricClient monitoringpb.MetricServiceClient // The call options for this service. CallOptions *MetricCallOptions // The metadata to be sent with each request. - metadata map[string][]string + xGoogHeader []string } // NewMetricClient creates a new metric service client. @@ -113,10 +105,11 @@ func NewMetricClient(ctx context.Context, opts ...option.ClientOption) (*MetricC } c := &MetricClient{ conn: conn, - client: monitoringpb.NewMetricServiceClient(conn), CallOptions: defaultMetricCallOptions(), + + metricClient: monitoringpb.NewMetricServiceClient(conn), } - c.SetGoogleClientInfo("gax", gax.Version) + c.setGoogleClientInfo() return c, nil } @@ -131,85 +124,88 @@ func (c *MetricClient) Close() error { return c.conn.Close() } -// SetGoogleClientInfo sets the name and version of the application in +// setGoogleClientInfo sets the name and version of the application in // the `x-goog-api-client` header passed on each request. Intended for // use by Google-written clients. -func (c *MetricClient) SetGoogleClientInfo(name, version string) { - c.metadata = map[string][]string{ - "x-goog-api-client": {fmt.Sprintf("%s/%s %s gax/%s go/%s", name, version, gapicNameVersion, gax.Version, runtime.Version())}, - } +func (c *MetricClient) setGoogleClientInfo(keyval ...string) { + kv := append([]string{"gl-go", version.Go()}, keyval...) + kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version) + c.xGoogHeader = []string{gax.XGoogHeader(kv...)} } -// ProjectPath returns the path for the project resource. +// MetricProjectPath returns the path for the project resource. func MetricProjectPath(project string) string { - path, err := metricProjectPathTemplate.Render(map[string]string{ - "project": project, - }) - if err != nil { - panic(err) - } - return path + return "" + + "projects/" + + project + + "" } -// MetricDescriptorPathPath returns the path for the metric descriptor path resource. -func MetricMetricDescriptorPathPath(project string, metricDescriptorPath string) string { - path, err := metricMetricDescriptorPathPathTemplate.Render(map[string]string{ - "project": project, - "metric_descriptor_path": metricDescriptorPath, - }) - if err != nil { - panic(err) - } - return path +// MetricMetricDescriptorPath returns the path for the metric descriptor resource. +func MetricMetricDescriptorPath(project, metricDescriptor string) string { + return "" + + "projects/" + + project + + "/metricDescriptors/" + + metricDescriptor + + "" } -// MonitoredResourceDescriptorPath returns the path for the monitored resource descriptor resource. -func MetricMonitoredResourceDescriptorPath(project string, monitoredResourceDescriptor string) string { - path, err := metricMonitoredResourceDescriptorPathTemplate.Render(map[string]string{ - "project": project, - "monitored_resource_descriptor": monitoredResourceDescriptor, - }) - if err != nil { - panic(err) - } - return path +// MetricMonitoredResourceDescriptorPath returns the path for the monitored resource descriptor resource. +func MetricMonitoredResourceDescriptorPath(project, monitoredResourceDescriptor string) string { + return "" + + "projects/" + + project + + "/monitoredResourceDescriptors/" + + monitoredResourceDescriptor + + "" } // ListMonitoredResourceDescriptors lists monitored resource descriptors that match a filter. This method does not require a Stackdriver account. -func (c *MetricClient) ListMonitoredResourceDescriptors(ctx context.Context, req *monitoringpb.ListMonitoredResourceDescriptorsRequest) *MonitoredResourceDescriptorIterator { - ctx = metadata.NewContext(ctx, c.metadata) +func (c *MetricClient) ListMonitoredResourceDescriptors(ctx context.Context, req *monitoringpb.ListMonitoredResourceDescriptorsRequest, opts ...gax.CallOption) *MonitoredResourceDescriptorIterator { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.ListMonitoredResourceDescriptors[0:len(c.CallOptions.ListMonitoredResourceDescriptors):len(c.CallOptions.ListMonitoredResourceDescriptors)], opts...) it := &MonitoredResourceDescriptorIterator{} - it.apiCall = func() error { + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoredrespb.MonitoredResourceDescriptor, string, error) { var resp *monitoringpb.ListMonitoredResourceDescriptorsResponse - err := gax.Invoke(ctx, func(ctx context.Context) error { + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - req.PageToken = it.nextPageToken - req.PageSize = it.pageSize - resp, err = c.client.ListMonitoredResourceDescriptors(ctx, req) + resp, err = c.metricClient.ListMonitoredResourceDescriptors(ctx, req, settings.GRPC...) return err - }, c.CallOptions.ListMonitoredResourceDescriptors...) + }, opts...) if err != nil { - return err + return nil, "", err } - if resp.NextPageToken == "" { - it.atLastPage = true + return resp.ResourceDescriptors, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err } - it.nextPageToken = resp.NextPageToken - it.items = resp.ResourceDescriptors - return nil + it.items = append(it.items, items...) + return nextPageToken, nil } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) return it } // GetMonitoredResourceDescriptor gets a single monitored resource descriptor. This method does not require a Stackdriver account. -func (c *MetricClient) GetMonitoredResourceDescriptor(ctx context.Context, req *monitoringpb.GetMonitoredResourceDescriptorRequest) (*monitoredrespb.MonitoredResourceDescriptor, error) { - ctx = metadata.NewContext(ctx, c.metadata) +func (c *MetricClient) GetMonitoredResourceDescriptor(ctx context.Context, req *monitoringpb.GetMonitoredResourceDescriptorRequest, opts ...gax.CallOption) (*monitoredrespb.MonitoredResourceDescriptor, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.GetMonitoredResourceDescriptor[0:len(c.CallOptions.GetMonitoredResourceDescriptor):len(c.CallOptions.GetMonitoredResourceDescriptor)], opts...) var resp *monitoredrespb.MonitoredResourceDescriptor - err := gax.Invoke(ctx, func(ctx context.Context) error { + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.client.GetMonitoredResourceDescriptor(ctx, req) + resp, err = c.metricClient.GetMonitoredResourceDescriptor(ctx, req, settings.GRPC...) return err - }, c.CallOptions.GetMonitoredResourceDescriptor...) + }, opts...) if err != nil { return nil, err } @@ -217,40 +213,50 @@ func (c *MetricClient) GetMonitoredResourceDescriptor(ctx context.Context, req * } // ListMetricDescriptors lists metric descriptors that match a filter. This method does not require a Stackdriver account. -func (c *MetricClient) ListMetricDescriptors(ctx context.Context, req *monitoringpb.ListMetricDescriptorsRequest) *MetricDescriptorIterator { - ctx = metadata.NewContext(ctx, c.metadata) +func (c *MetricClient) ListMetricDescriptors(ctx context.Context, req *monitoringpb.ListMetricDescriptorsRequest, opts ...gax.CallOption) *MetricDescriptorIterator { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.ListMetricDescriptors[0:len(c.CallOptions.ListMetricDescriptors):len(c.CallOptions.ListMetricDescriptors)], opts...) it := &MetricDescriptorIterator{} - it.apiCall = func() error { + it.InternalFetch = func(pageSize int, pageToken string) ([]*metricpb.MetricDescriptor, string, error) { var resp *monitoringpb.ListMetricDescriptorsResponse - err := gax.Invoke(ctx, func(ctx context.Context) error { + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - req.PageToken = it.nextPageToken - req.PageSize = it.pageSize - resp, err = c.client.ListMetricDescriptors(ctx, req) + resp, err = c.metricClient.ListMetricDescriptors(ctx, req, settings.GRPC...) return err - }, c.CallOptions.ListMetricDescriptors...) + }, opts...) if err != nil { - return err + return nil, "", err } - if resp.NextPageToken == "" { - it.atLastPage = true + return resp.MetricDescriptors, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err } - it.nextPageToken = resp.NextPageToken - it.items = resp.MetricDescriptors - return nil + it.items = append(it.items, items...) + return nextPageToken, nil } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) return it } // GetMetricDescriptor gets a single metric descriptor. This method does not require a Stackdriver account. -func (c *MetricClient) GetMetricDescriptor(ctx context.Context, req *monitoringpb.GetMetricDescriptorRequest) (*metricpb.MetricDescriptor, error) { - ctx = metadata.NewContext(ctx, c.metadata) +func (c *MetricClient) GetMetricDescriptor(ctx context.Context, req *monitoringpb.GetMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.GetMetricDescriptor[0:len(c.CallOptions.GetMetricDescriptor):len(c.CallOptions.GetMetricDescriptor)], opts...) var resp *metricpb.MetricDescriptor - err := gax.Invoke(ctx, func(ctx context.Context) error { + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.client.GetMetricDescriptor(ctx, req) + resp, err = c.metricClient.GetMetricDescriptor(ctx, req, settings.GRPC...) return err - }, c.CallOptions.GetMetricDescriptor...) + }, opts...) if err != nil { return nil, err } @@ -259,15 +265,16 @@ func (c *MetricClient) GetMetricDescriptor(ctx context.Context, req *monitoringp // CreateMetricDescriptor creates a new metric descriptor. // User-created metric descriptors define -// [custom metrics](/monitoring/custom-metrics). -func (c *MetricClient) CreateMetricDescriptor(ctx context.Context, req *monitoringpb.CreateMetricDescriptorRequest) (*metricpb.MetricDescriptor, error) { - ctx = metadata.NewContext(ctx, c.metadata) +// custom metrics (at /monitoring/custom-metrics). +func (c *MetricClient) CreateMetricDescriptor(ctx context.Context, req *monitoringpb.CreateMetricDescriptorRequest, opts ...gax.CallOption) (*metricpb.MetricDescriptor, error) { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.CreateMetricDescriptor[0:len(c.CallOptions.CreateMetricDescriptor):len(c.CallOptions.CreateMetricDescriptor)], opts...) var resp *metricpb.MetricDescriptor - err := gax.Invoke(ctx, func(ctx context.Context) error { + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - resp, err = c.client.CreateMetricDescriptor(ctx, req) + resp, err = c.metricClient.CreateMetricDescriptor(ctx, req, settings.GRPC...) return err - }, c.CallOptions.CreateMetricDescriptor...) + }, opts...) if err != nil { return nil, err } @@ -275,40 +282,50 @@ func (c *MetricClient) CreateMetricDescriptor(ctx context.Context, req *monitori } // DeleteMetricDescriptor deletes a metric descriptor. Only user-created -// [custom metrics](/monitoring/custom-metrics) can be deleted. -func (c *MetricClient) DeleteMetricDescriptor(ctx context.Context, req *monitoringpb.DeleteMetricDescriptorRequest) error { - ctx = metadata.NewContext(ctx, c.metadata) - err := gax.Invoke(ctx, func(ctx context.Context) error { +// custom metrics (at /monitoring/custom-metrics) can be deleted. +func (c *MetricClient) DeleteMetricDescriptor(ctx context.Context, req *monitoringpb.DeleteMetricDescriptorRequest, opts ...gax.CallOption) error { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.DeleteMetricDescriptor[0:len(c.CallOptions.DeleteMetricDescriptor):len(c.CallOptions.DeleteMetricDescriptor)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - _, err = c.client.DeleteMetricDescriptor(ctx, req) + _, err = c.metricClient.DeleteMetricDescriptor(ctx, req, settings.GRPC...) return err - }, c.CallOptions.DeleteMetricDescriptor...) + }, opts...) return err } // ListTimeSeries lists time series that match a filter. This method does not require a Stackdriver account. -func (c *MetricClient) ListTimeSeries(ctx context.Context, req *monitoringpb.ListTimeSeriesRequest) *TimeSeriesIterator { - ctx = metadata.NewContext(ctx, c.metadata) +func (c *MetricClient) ListTimeSeries(ctx context.Context, req *monitoringpb.ListTimeSeriesRequest, opts ...gax.CallOption) *TimeSeriesIterator { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.ListTimeSeries[0:len(c.CallOptions.ListTimeSeries):len(c.CallOptions.ListTimeSeries)], opts...) it := &TimeSeriesIterator{} - it.apiCall = func() error { + it.InternalFetch = func(pageSize int, pageToken string) ([]*monitoringpb.TimeSeries, string, error) { var resp *monitoringpb.ListTimeSeriesResponse - err := gax.Invoke(ctx, func(ctx context.Context) error { + req.PageToken = pageToken + if pageSize > math.MaxInt32 { + req.PageSize = math.MaxInt32 + } else { + req.PageSize = int32(pageSize) + } + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - req.PageToken = it.nextPageToken - req.PageSize = it.pageSize - resp, err = c.client.ListTimeSeries(ctx, req) + resp, err = c.metricClient.ListTimeSeries(ctx, req, settings.GRPC...) return err - }, c.CallOptions.ListTimeSeries...) + }, opts...) if err != nil { - return err + return nil, "", err } - if resp.NextPageToken == "" { - it.atLastPage = true + return resp.TimeSeries, resp.NextPageToken, nil + } + fetch := func(pageSize int, pageToken string) (string, error) { + items, nextPageToken, err := it.InternalFetch(pageSize, pageToken) + if err != nil { + return "", err } - it.nextPageToken = resp.NextPageToken - it.items = resp.TimeSeries - return nil + it.items = append(it.items, items...) + return nextPageToken, nil } + it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf) return it } @@ -316,264 +333,139 @@ func (c *MetricClient) ListTimeSeries(ctx context.Context, req *monitoringpb.Lis // The response is empty if all time series in the request were written. // If any time series could not be written, a corresponding failure message is // included in the error response. -func (c *MetricClient) CreateTimeSeries(ctx context.Context, req *monitoringpb.CreateTimeSeriesRequest) error { - ctx = metadata.NewContext(ctx, c.metadata) - err := gax.Invoke(ctx, func(ctx context.Context) error { +func (c *MetricClient) CreateTimeSeries(ctx context.Context, req *monitoringpb.CreateTimeSeriesRequest, opts ...gax.CallOption) error { + ctx = insertXGoog(ctx, c.xGoogHeader) + opts = append(c.CallOptions.CreateTimeSeries[0:len(c.CallOptions.CreateTimeSeries):len(c.CallOptions.CreateTimeSeries)], opts...) + err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error { var err error - _, err = c.client.CreateTimeSeries(ctx, req) + _, err = c.metricClient.CreateTimeSeries(ctx, req, settings.GRPC...) return err - }, c.CallOptions.CreateTimeSeries...) + }, opts...) return err } -// MonitoredResourceDescriptorIterator manages a stream of *monitoredrespb.MonitoredResourceDescriptor. -type MonitoredResourceDescriptorIterator struct { - // The current page data. - items []*monitoredrespb.MonitoredResourceDescriptor - atLastPage bool - currentIndex int - pageSize int32 - nextPageToken string - apiCall func() error -} - -// NextPage returns the next page of results. -// It will return at most the number of results specified by the last call to SetPageSize. -// If SetPageSize was never called or was called with a value less than 1, -// the page size is determined by the underlying service. -// -// NextPage may return a second return value of Done along with the last page of results. After -// NextPage returns Done, all subsequent calls to NextPage will return (nil, Done). -// -// Next and NextPage should not be used with the same iterator. -func (it *MonitoredResourceDescriptorIterator) NextPage() ([]*monitoredrespb.MonitoredResourceDescriptor, error) { - if it.atLastPage { - // We already returned Done with the last page of items. Continue to - // return Done, but with no items. - return nil, Done - } - if err := it.apiCall(); err != nil { - return nil, err - } - if it.atLastPage { - return it.items, Done - } - return it.items, nil -} +// MetricDescriptorIterator manages a stream of *metricpb.MetricDescriptor. +type MetricDescriptorIterator struct { + items []*metricpb.MetricDescriptor + pageInfo *iterator.PageInfo + nextFunc func() error -// Next returns the next result. Its second return value is Done if there are no more results. -// Once next returns Done, all subsequent calls will return Done. -// -// Internally, Next retrieves results in bulk. You can call SetPageSize as a performance hint to -// affect how many results are retrieved in a single RPC. -// -// SetPageToken should not be called when using Next. -// -// Next and NextPage should not be used with the same iterator. -func (it *MonitoredResourceDescriptorIterator) Next() (*monitoredrespb.MonitoredResourceDescriptor, error) { - for it.currentIndex >= len(it.items) { - if it.atLastPage { - return nil, Done - } - if err := it.apiCall(); err != nil { - return nil, err - } - it.currentIndex = 0 - } - result := it.items[it.currentIndex] - it.currentIndex++ - return result, nil + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*metricpb.MetricDescriptor, nextPageToken string, err error) } -// PageSize returns the page size for all subsequent calls to NextPage. -func (it *MonitoredResourceDescriptorIterator) PageSize() int { - return int(it.pageSize) +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *MetricDescriptorIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo } -// SetPageSize sets the page size for all subsequent calls to NextPage. -func (it *MonitoredResourceDescriptorIterator) SetPageSize(pageSize int) { - if pageSize > math.MaxInt32 { - pageSize = math.MaxInt32 +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *MetricDescriptorIterator) Next() (*metricpb.MetricDescriptor, error) { + var item *metricpb.MetricDescriptor + if err := it.nextFunc(); err != nil { + return item, err } - it.pageSize = int32(pageSize) + item = it.items[0] + it.items = it.items[1:] + return item, nil } -// SetPageToken sets the page token for the next call to NextPage, to resume the iteration from -// a previous point. -func (it *MonitoredResourceDescriptorIterator) SetPageToken(token string) { - it.nextPageToken = token +func (it *MetricDescriptorIterator) bufLen() int { + return len(it.items) } -// NextPageToken returns a page token that can be used with SetPageToken to resume -// iteration from the next page. It returns the empty string if there are no more pages. -func (it *MonitoredResourceDescriptorIterator) NextPageToken() string { - return it.nextPageToken +func (it *MetricDescriptorIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b } -// MetricDescriptorIterator manages a stream of *metricpb.MetricDescriptor. -type MetricDescriptorIterator struct { - // The current page data. - items []*metricpb.MetricDescriptor - atLastPage bool - currentIndex int - pageSize int32 - nextPageToken string - apiCall func() error -} - -// NextPage returns the next page of results. -// It will return at most the number of results specified by the last call to SetPageSize. -// If SetPageSize was never called or was called with a value less than 1, -// the page size is determined by the underlying service. -// -// NextPage may return a second return value of Done along with the last page of results. After -// NextPage returns Done, all subsequent calls to NextPage will return (nil, Done). -// -// Next and NextPage should not be used with the same iterator. -func (it *MetricDescriptorIterator) NextPage() ([]*metricpb.MetricDescriptor, error) { - if it.atLastPage { - // We already returned Done with the last page of items. Continue to - // return Done, but with no items. - return nil, Done - } - if err := it.apiCall(); err != nil { - return nil, err - } - if it.atLastPage { - return it.items, Done - } - return it.items, nil -} +// MonitoredResourceDescriptorIterator manages a stream of *monitoredrespb.MonitoredResourceDescriptor. +type MonitoredResourceDescriptorIterator struct { + items []*monitoredrespb.MonitoredResourceDescriptor + pageInfo *iterator.PageInfo + nextFunc func() error -// Next returns the next result. Its second return value is Done if there are no more results. -// Once next returns Done, all subsequent calls will return Done. -// -// Internally, Next retrieves results in bulk. You can call SetPageSize as a performance hint to -// affect how many results are retrieved in a single RPC. -// -// SetPageToken should not be called when using Next. -// -// Next and NextPage should not be used with the same iterator. -func (it *MetricDescriptorIterator) Next() (*metricpb.MetricDescriptor, error) { - for it.currentIndex >= len(it.items) { - if it.atLastPage { - return nil, Done - } - if err := it.apiCall(); err != nil { - return nil, err - } - it.currentIndex = 0 - } - result := it.items[it.currentIndex] - it.currentIndex++ - return result, nil + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoredrespb.MonitoredResourceDescriptor, nextPageToken string, err error) } -// PageSize returns the page size for all subsequent calls to NextPage. -func (it *MetricDescriptorIterator) PageSize() int { - return int(it.pageSize) +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *MonitoredResourceDescriptorIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo } -// SetPageSize sets the page size for all subsequent calls to NextPage. -func (it *MetricDescriptorIterator) SetPageSize(pageSize int) { - if pageSize > math.MaxInt32 { - pageSize = math.MaxInt32 +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *MonitoredResourceDescriptorIterator) Next() (*monitoredrespb.MonitoredResourceDescriptor, error) { + var item *monitoredrespb.MonitoredResourceDescriptor + if err := it.nextFunc(); err != nil { + return item, err } - it.pageSize = int32(pageSize) + item = it.items[0] + it.items = it.items[1:] + return item, nil } -// SetPageToken sets the page token for the next call to NextPage, to resume the iteration from -// a previous point. -func (it *MetricDescriptorIterator) SetPageToken(token string) { - it.nextPageToken = token +func (it *MonitoredResourceDescriptorIterator) bufLen() int { + return len(it.items) } -// NextPageToken returns a page token that can be used with SetPageToken to resume -// iteration from the next page. It returns the empty string if there are no more pages. -func (it *MetricDescriptorIterator) NextPageToken() string { - return it.nextPageToken +func (it *MonitoredResourceDescriptorIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b } // TimeSeriesIterator manages a stream of *monitoringpb.TimeSeries. type TimeSeriesIterator struct { - // The current page data. - items []*monitoringpb.TimeSeries - atLastPage bool - currentIndex int - pageSize int32 - nextPageToken string - apiCall func() error -} - -// NextPage returns the next page of results. -// It will return at most the number of results specified by the last call to SetPageSize. -// If SetPageSize was never called or was called with a value less than 1, -// the page size is determined by the underlying service. -// -// NextPage may return a second return value of Done along with the last page of results. After -// NextPage returns Done, all subsequent calls to NextPage will return (nil, Done). -// -// Next and NextPage should not be used with the same iterator. -func (it *TimeSeriesIterator) NextPage() ([]*monitoringpb.TimeSeries, error) { - if it.atLastPage { - // We already returned Done with the last page of items. Continue to - // return Done, but with no items. - return nil, Done - } - if err := it.apiCall(); err != nil { - return nil, err - } - if it.atLastPage { - return it.items, Done - } - return it.items, nil -} + items []*monitoringpb.TimeSeries + pageInfo *iterator.PageInfo + nextFunc func() error -// Next returns the next result. Its second return value is Done if there are no more results. -// Once next returns Done, all subsequent calls will return Done. -// -// Internally, Next retrieves results in bulk. You can call SetPageSize as a performance hint to -// affect how many results are retrieved in a single RPC. -// -// SetPageToken should not be called when using Next. -// -// Next and NextPage should not be used with the same iterator. -func (it *TimeSeriesIterator) Next() (*monitoringpb.TimeSeries, error) { - for it.currentIndex >= len(it.items) { - if it.atLastPage { - return nil, Done - } - if err := it.apiCall(); err != nil { - return nil, err - } - it.currentIndex = 0 - } - result := it.items[it.currentIndex] - it.currentIndex++ - return result, nil + // InternalFetch is for use by the Google Cloud Libraries only. + // It is not part of the stable interface of this package. + // + // InternalFetch returns results from a single call to the underlying RPC. + // The number of results is no greater than pageSize. + // If there are no more results, nextPageToken is empty and err is nil. + InternalFetch func(pageSize int, pageToken string) (results []*monitoringpb.TimeSeries, nextPageToken string, err error) } -// PageSize returns the page size for all subsequent calls to NextPage. -func (it *TimeSeriesIterator) PageSize() int { - return int(it.pageSize) +// PageInfo supports pagination. See the google.golang.org/api/iterator package for details. +func (it *TimeSeriesIterator) PageInfo() *iterator.PageInfo { + return it.pageInfo } -// SetPageSize sets the page size for all subsequent calls to NextPage. -func (it *TimeSeriesIterator) SetPageSize(pageSize int) { - if pageSize > math.MaxInt32 { - pageSize = math.MaxInt32 +// Next returns the next result. Its second return value is iterator.Done if there are no more +// results. Once Next returns Done, all subsequent calls will return Done. +func (it *TimeSeriesIterator) Next() (*monitoringpb.TimeSeries, error) { + var item *monitoringpb.TimeSeries + if err := it.nextFunc(); err != nil { + return item, err } - it.pageSize = int32(pageSize) + item = it.items[0] + it.items = it.items[1:] + return item, nil } -// SetPageToken sets the page token for the next call to NextPage, to resume the iteration from -// a previous point. -func (it *TimeSeriesIterator) SetPageToken(token string) { - it.nextPageToken = token +func (it *TimeSeriesIterator) bufLen() int { + return len(it.items) } -// NextPageToken returns a page token that can be used with SetPageToken to resume -// iteration from the next page. It returns the empty string if there are no more pages. -func (it *TimeSeriesIterator) NextPageToken() string { - return it.nextPageToken +func (it *TimeSeriesIterator) takeBuf() interface{} { + b := it.items + it.items = nil + return b } diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/LICENSE b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/LICENSE index e453acda..cb2ec6c5 100644 --- a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/LICENSE +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/LICENSE @@ -1,6 +1,6 @@ The MIT License -Copyright (c) 2015 Long Nguyen +Copyright (c) 2017 Long Nguyen Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/README.md b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/README.md index 47d53d8a..25b896b5 100644 --- a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/README.md +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/README.md @@ -9,7 +9,13 @@ ### Usage -`go get github.com/cloudfoundry-community/go-cfclient` +``` +go get github.com/cloudfoundry-community/go-cfclient +``` + +NOTE: Currently this project is not versioning its releases and so breaking changes might be introduced. Whilst hopefully notifications of breaking changes are made via commit messages, ideally your project will use a local vendoring system to lock in a version of `go-cfclient` that is known to work for you. This will allow you to control the timing and maintenance of upgrades to newer versions of this library. + +Some example code: ```go package main @@ -19,24 +25,23 @@ import ( ) func main() { - c := &Config{ + c := &cfclient.Config{ ApiAddress: "https://api.10.244.0.34.xip.io", - LoginAddress: "https://login.10.244.0.34.xip.io", Username: "admin", Password: "admin", } - client := NewClient(c) - apps := client.ListApps() + client, _ := cfclient.NewClient(c) + apps, _ := client.ListApps() fmt.Println(apps) } ``` ### Developing & Contributing -You can use Godep to restor the dependency +You can use Godep to restore the dependency Tested with go1.5.3 ```bash godep go build ``` -Pull requests welcomed. Please ensure you make your changes in a branch off of the `develop` branch, not the `master` branch. +Pull requests welcome. diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/appevents.go b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/appevents.go new file mode 100644 index 00000000..8784c62e --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/appevents.go @@ -0,0 +1,182 @@ +package cfclient + +import ( + "encoding/json" + "io/ioutil" + "time" + + "github.com/pkg/errors" +) + +const ( + //AppCrash app.crash event const + AppCrash = "app.crash" + //AppStart audit.app.start event const + AppStart = "audit.app.start" + //AppStop audit.app.stop event const + AppStop = "audit.app.stop" + //AppUpdate audit.app.update event const + AppUpdate = "audit.app.update" + //AppCreate audit.app.create event const + AppCreate = "audit.app.create" + //AppDelete audit.app.delete-request event const + AppDelete = "audit.app.delete-request" + //AppSSHAuth audit.app.ssh-authorized event const + AppSSHAuth = "audit.app.ssh-authorized" + //AppSSHUnauth audit.app.ssh-unauthorized event const + AppSSHUnauth = "audit.app.ssh-unauthorized" + //AppRestage audit.app.restage event const + AppRestage = "audit.app.restage" + //AppMapRoute audit.app.map-route event const + AppMapRoute = "audit.app.map-route" + //AppUnmapRoute audit.app.unmap-route event const + AppUnmapRoute = "audit.app.unmap-route" + //FilterTimestamp const for query filter timestamp + FilterTimestamp = "timestamp" + //FilterActee const for query filter actee + FilterActee = "actee" +) + +//ValidOperators const for all valid operators in a query +var ValidOperators = []string{":", ">=", "<=", "<", ">", "IN"} + +// AppEventResponse the entire response +type AppEventResponse struct { + Results int `json:"total_results"` + Pages int `json:"total_pages"` + PrevURL string `json:"prev_url"` + NextURL string `json:"next_url"` + Resources []AppEventResource `json:"resources"` +} + +// AppEventResource the event resources +type AppEventResource struct { + Meta Meta `json:"metadata"` + Entity AppEventEntity `json:"entity"` +} + +//AppEventQuery a struct for defining queries like 'q=filter>value' or 'q=filter IN a,b,c' +type AppEventQuery struct { + Filter string + Operator string + Value string +} + +// The AppEventEntity the actual app event body +type AppEventEntity struct { + //EventTypes are app.crash, audit.app.start, audit.app.stop, audit.app.update, audit.app.create, audit.app.delete-request + EventType string `json:"type"` + //The GUID of the actor. + Actor string `json:"actor"` + //The actor type, user or app + ActorType string `json:"actor_type"` + //The name of the actor. + ActorName string `json:"actor_name"` + //The GUID of the actee. + Actee string `json:"actee"` + //The actee type, space, app or v3-app + ActeeType string `json:"actee_type"` + //The name of the actee. + ActeeName string `json:"actee_name"` + //Timestamp format "2016-02-26T13:29:44Z". The event creation time. + Timestamp time.Time `json:"timestamp"` + MetaData struct { + //app.crash event fields + ExitDescription string `json:"exit_description,omitempty"` + ExitReason string `json:"reason,omitempty"` + ExitStatus string `json:"exit_status,omitempty"` + + Request struct { + Name string `json:"name,omitempty"` + Instances float64 `json:"instances,omitempty"` + State string `json:"state,omitempty"` + Memory float64 `json:"memory,omitempty"` + EnvironmentVars string `json:"environment_json,omitempty"` + DockerCredentials string `json:"docker_credentials_json,omitempty"` + //audit.app.create event fields + Console bool `json:"console,omitempty"` + Buildpack string `json:"buildpack,omitempty"` + Space string `json:"space_guid,omitempty"` + HealthcheckType string `json:"health_check_type,omitempty"` + HealthcheckTimeout float64 `json:"health_check_timeout,omitempty"` + Production bool `json:"production,omitempty"` + //app.crash event fields + Index float64 `json:"index,omitempty"` + } `json:"request"` + } `json:"metadata"` +} + +// ListAppEvents returns all app events based on eventType +func (c *Client) ListAppEvents(eventType string) ([]AppEventEntity, error) { + return c.ListAppEventsByQuery(eventType, nil) +} + +// ListAppEventsByQuery returns all app events based on eventType and queries +func (c *Client) ListAppEventsByQuery(eventType string, queries []AppEventQuery) ([]AppEventEntity, error) { + var events []AppEventEntity + + if eventType != AppCrash && eventType != AppStart && eventType != AppStop && eventType != AppUpdate && eventType != AppCreate && + eventType != AppDelete && eventType != AppSSHAuth && eventType != AppSSHUnauth && eventType != AppRestage && + eventType != AppMapRoute && eventType != AppUnmapRoute { + return nil, errors.New("Unsupported app event type " + eventType) + } + + var query = "/v2/events?q=type:" + eventType + //adding the additional queries + if queries != nil && len(queries) > 0 { + for _, eventQuery := range queries { + if eventQuery.Filter != FilterTimestamp && eventQuery.Filter != FilterActee { + return nil, errors.New("Unsupported query filter type " + eventQuery.Filter) + } + if !stringInSlice(eventQuery.Operator, ValidOperators) { + return nil, errors.New("Unsupported query operator type " + eventQuery.Operator) + } + query += "&q=" + eventQuery.Filter + eventQuery.Operator + eventQuery.Value + } + } + + for { + eventResponse, err := c.getAppEventsResponse(query) + if err != nil { + return []AppEventEntity{}, err + } + for _, event := range eventResponse.Resources { + events = append(events, event.Entity) + } + query = eventResponse.NextURL + if query == "" { + break + } + } + + return events, nil +} + +func (c *Client) getAppEventsResponse(query string) (AppEventResponse, error) { + var eventResponse AppEventResponse + r := c.NewRequest("GET", query) + resp, err := c.DoRequest(r) + if err != nil { + return AppEventResponse{}, errors.Wrap(err, "Error requesting appevents") + } + resBody, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return AppEventResponse{}, errors.Wrap(err, "Error reading appevents response body") + } + + err = json.Unmarshal(resBody, &eventResponse) + if err != nil { + return AppEventResponse{}, errors.Wrap(err, "Error unmarshalling appevent") + } + return eventResponse, nil +} + +func stringInSlice(str string, list []string) bool { + for _, v := range list { + if v == str { + return true + } + } + return false +} diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/apps.go b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/apps.go index 88a73e36..8b10b24a 100644 --- a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/apps.go +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/apps.go @@ -4,7 +4,11 @@ import ( "encoding/json" "fmt" "io/ioutil" - "log" + "net/url" + "strconv" + "time" + + "github.com/pkg/errors" ) type AppResponse struct { @@ -20,63 +24,217 @@ type AppResource struct { } type App struct { - Guid string `json:"guid"` - Name string `json:"name"` - Environment map[string]interface{} `json:"environment_json"` - SpaceURL string `json:"space_url"` - SpaceData SpaceResource `json:"space"` - c *Client + Guid string `json:"guid"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + Name string `json:"name"` + Memory int `json:"memory"` + Instances int `json:"instances"` + DiskQuota int `json:"disk_quota"` + SpaceGuid string `json:"space_guid"` + StackGuid string `json:"stack_guid"` + State string `json:"state"` + PackageState string `json:"package_state"` + Command string `json:"command"` + Buildpack string `json:"buildpack"` + DetectedBuildpack string `json:"detected_buildpack"` + DetectedBuildpackGuid string `json:"detected_buildpack_guid"` + HealthCheckHttpEndpoint string `json:"health_check_http_endpoint"` + HealthCheckType string `json:"health_check_type"` + HealthCheckTimeout int `json:"health_check_timeout"` + Diego bool `json:"diego"` + EnableSSH bool `json:"enable_ssh"` + DetectedStartCommand string `json:"detected_start_command"` + DockerImage string `json:"docker_image"` + DockerCredentials map[string]interface{} `json:"docker_credentials_json"` + Environment map[string]interface{} `json:"environment_json"` + StagingFailedReason string `json:"staging_failed_reason"` + StagingFailedDescription string `json:"staging_failed_description"` + Ports []int `json:"ports"` + SpaceURL string `json:"space_url"` + SpaceData SpaceResource `json:"space"` + PackageUpdatedAt string `json:"package_updated_at"` + c *Client } type AppInstance struct { + State string `json:"state"` + Since sinceTime `json:"since"` +} + +type AppStats struct { State string `json:"state"` + Stats struct { + Name string `json:"name"` + Uris []string `json:"uris"` + Host string `json:"host"` + Port int `json:"port"` + Uptime int `json:"uptime"` + MemQuota int `json:"mem_quota"` + DiskQuota int `json:"disk_quota"` + FdsQuota int `json:"fds_quota"` + Usage struct { + Time statTime `json:"time"` + CPU float64 `json:"cpu"` + Mem int `json:"mem"` + Disk int `json:"disk"` + } `json:"usage"` + } `json:"stats"` +} + +type AppSummary struct { + Guid string `json:"guid"` + Name string `json:"name"` + ServiceCount int `json:"service_count"` + RunningInstances int `json:"running_instances"` + SpaceGuid string `json:"space_guid"` + StackGuid string `json:"stack_guid"` + Buildpack string `json:"buildpack"` + DetectedBuildpack string `json:"detected_buildpack"` + Environment map[string]interface{} `json:"environment_json"` + Memory int `json:"memory"` + Instances int `json:"instances"` + DiskQuota int `json:"disk_quota"` + State string `json:"state"` + Command string `json:"command"` + PackageState string `json:"package_state"` + HealthCheckType string `json:"health_check_type"` + HealthCheckTimeout int `json:"health_check_timeout"` + StagingFailedReason string `json:"staging_failed_reason"` + StagingFailedDescription string `json:"staging_failed_description"` + Diego bool `json:"diego"` + DockerImage string `json:"docker_image"` + DetectedStartCommand string `json:"detected_start_command"` + EnableSSH bool `json:"enable_ssh"` + DockerCredentials map[string]interface{} `json:"docker_credentials_json"` +} + +type AppEnv struct { + // These can have arbitrary JSON so need to map to interface{} + Environment map[string]interface{} `json:"environment_json"` + StagingEnv map[string]interface{} `json:"staging_env_json"` + RunningEnv map[string]interface{} `json:"running_env_json"` + SystemEnv map[string]interface{} `json:"system_env_json"` + ApplicationEnv map[string]interface{} `json:"application_env_json"` +} + +// Custom time types to handle non-RFC3339 formatting in API JSON + +type sinceTime struct { + time.Time } -func (a *App) Space() Space { +func (s *sinceTime) UnmarshalJSON(b []byte) (err error) { + timeFlt, err := strconv.ParseFloat(string(b), 64) + if err != nil { + return err + } + time := time.Unix(int64(timeFlt), 0) + *s = sinceTime{time} + return nil +} + +func (s sinceTime) ToTime() time.Time { + t, _ := time.Parse(time.UnixDate, s.Format(time.UnixDate)) + return t +} + +type statTime struct { + time.Time +} + +func (s *statTime) UnmarshalJSON(b []byte) (err error) { + timeString, err := strconv.Unquote(string(b)) + if err != nil { + return err + } + + possibleFormats := [...]string{time.RFC3339, time.RFC3339Nano, "2006-01-02 15:04:05 -0700", "2006-01-02 15:04:05 MST"} + + var value time.Time + for _, possibleFormat := range possibleFormats { + if value, err = time.Parse(possibleFormat, timeString); err == nil { + *s = statTime{value} + return nil + } + } + + return fmt.Errorf("%s was not in any of the expected Date Formats %v", timeString, possibleFormats) +} + +func (s statTime) ToTime() time.Time { + t, _ := time.Parse(time.UnixDate, s.Format(time.UnixDate)) + return t +} + +func (a *App) Space() (Space, error) { var spaceResource SpaceResource - r := a.c.newRequest("GET", a.SpaceURL) - resp, err := a.c.doRequest(r) + r := a.c.NewRequest("GET", a.SpaceURL) + resp, err := a.c.DoRequest(r) if err != nil { - log.Printf("Error requesting space %v", err) + return Space{}, errors.Wrap(err, "Error requesting space") } + defer resp.Body.Close() resBody, err := ioutil.ReadAll(resp.Body) if err != nil { - log.Printf("Error reading space request %v", resBody) + return Space{}, errors.Wrap(err, "Error reading space response") } err = json.Unmarshal(resBody, &spaceResource) if err != nil { - log.Printf("Error unmarshaling space %v", err) + return Space{}, errors.Wrap(err, "Error unmarshalling body") } spaceResource.Entity.Guid = spaceResource.Meta.Guid spaceResource.Entity.c = a.c - return spaceResource.Entity + return spaceResource.Entity, nil } -func (c *Client) ListApps() []App { - var apps []App +// ListAppsByQueryWithLimits queries totalPages app info. When totalPages is +// less and equal than 0, it queries all app info +// When there are no more than totalPages apps on server side, all apps info will be returned +func (c *Client) ListAppsByQueryWithLimits(query url.Values, totalPages int) ([]App, error) { + return c.listApps("/v2/apps?"+query.Encode(), totalPages) +} - requestUrl := "/v2/apps?inline-relations-depth=2" +func (c *Client) ListAppsByQuery(query url.Values) ([]App, error) { + return c.listApps("/v2/apps?"+query.Encode(), -1) +} + +func (c *Client) ListApps() ([]App, error) { + q := url.Values{} + q.Set("inline-relations-depth", "2") + return c.ListAppsByQuery(q) +} + +func (c *Client) ListAppsByRoute(routeGuid string) ([]App, error) { + return c.listApps(fmt.Sprintf("/v2/routes/%s/apps", routeGuid), -1) +} + +func (c *Client) listApps(requestUrl string, totalPages int) ([]App, error) { + pages := 0 + apps := []App{} for { var appResp AppResponse - r := c.newRequest("GET", requestUrl) - resp, err := c.doRequest(r) - + r := c.NewRequest("GET", requestUrl) + resp, err := c.DoRequest(r) if err != nil { - log.Printf("Error requesting apps %v", err) + return nil, errors.Wrap(err, "Error requesting apps") } + defer resp.Body.Close() resBody, err := ioutil.ReadAll(resp.Body) if err != nil { - log.Printf("Error reading app request %v", resBody) + return nil, errors.Wrap(err, "Error reading app request") } err = json.Unmarshal(resBody, &appResp) if err != nil { - log.Printf("Error unmarshaling app %v", err) + return nil, errors.Wrap(err, "Error unmarshalling app") } for _, app := range appResp.Resources { app.Entity.Guid = app.Meta.Guid + app.Entity.CreatedAt = app.Meta.CreatedAt + app.Entity.UpdatedAt = app.Meta.UpdatedAt app.Entity.SpaceData.Entity.Guid = app.Entity.SpaceData.Meta.Guid app.Entity.SpaceData.Entity.OrgData.Entity.Guid = app.Entity.SpaceData.Entity.OrgData.Meta.Guid app.Entity.c = c @@ -87,65 +245,140 @@ func (c *Client) ListApps() []App { if requestUrl == "" { break } + + pages += 1 + if totalPages > 0 && pages >= totalPages { + break + } } - return apps + return apps, nil } -func (c *Client) GetAppInstances(guid string) map[string]AppInstance { +func (c *Client) GetAppInstances(guid string) (map[string]AppInstance, error) { var appInstances map[string]AppInstance requestURL := fmt.Sprintf("/v2/apps/%s/instances", guid) - r := c.newRequest("GET", requestURL) - resp, err := c.doRequest(r) - defer resp.Body.Close() + r := c.NewRequest("GET", requestURL) + resp, err := c.DoRequest(r) if err != nil { - log.Printf("Error requesting app instances %v", err) + return nil, errors.Wrap(err, "Error requesting app instances") } + defer resp.Body.Close() resBody, err := ioutil.ReadAll(resp.Body) if err != nil { - log.Printf("Error reading app instances %v", err) + return nil, errors.Wrap(err, "Error reading app instances") } err = json.Unmarshal(resBody, &appInstances) if err != nil { - log.Printf("Error unmarshalling app instances %v", err) + return nil, errors.Wrap(err, "Error unmarshalling app instances") + } + return appInstances, nil +} + +func (c *Client) GetAppEnv(guid string) (AppEnv, error) { + var appEnv AppEnv + + requestURL := fmt.Sprintf("/v2/apps/%s/env", guid) + r := c.NewRequest("GET", requestURL) + resp, err := c.DoRequest(r) + if err != nil { + return appEnv, errors.Wrap(err, "Error requesting app env") } - return appInstances + defer resp.Body.Close() + resBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return appEnv, errors.Wrap(err, "Error reading app env") + } + err = json.Unmarshal(resBody, &appEnv) + if err != nil { + return appEnv, errors.Wrap(err, "Error unmarshalling app env") + } + return appEnv, nil +} + +func (c *Client) GetAppRoutes(guid string) ([]Route, error) { + return c.fetchRoutes(fmt.Sprintf("/v2/apps/%s/routes", guid)) +} + +func (c *Client) GetAppStats(guid string) (map[string]AppStats, error) { + var appStats map[string]AppStats + + requestURL := fmt.Sprintf("/v2/apps/%s/stats", guid) + r := c.NewRequest("GET", requestURL) + resp, err := c.DoRequest(r) + if err != nil { + return nil, errors.Wrap(err, "Error requesting app stats") + } + defer resp.Body.Close() + resBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, errors.Wrap(err, "Error reading app stats") + } + err = json.Unmarshal(resBody, &appStats) + if err != nil { + return nil, errors.Wrap(err, "Error unmarshalling app stats") + } + return appStats, nil } func (c *Client) KillAppInstance(guid string, index string) error { requestURL := fmt.Sprintf("/v2/apps/%s/instances/%s", guid, index) - r := c.newRequest("DELETE", requestURL) - resp, err := c.doRequest(r) - defer resp.Body.Close() + r := c.NewRequest("DELETE", requestURL) + resp, err := c.DoRequest(r) if err != nil { - log.Printf("Error killing app instance %v", err) - return fmt.Errorf("Error stopping app %s at index %s", guid, index) + return errors.Wrapf(err, "Error stopping app %s at index %s", guid, index) } + defer resp.Body.Close() if resp.StatusCode != 204 { - return fmt.Errorf("Error stopping app %s at index %s", guid, index) + return errors.Wrapf(err, "Error stopping app %s at index %s", guid, index) } return nil } -func (c *Client) AppByGuid(guid string) App { +func (c *Client) GetAppByGuid(guid string) (App, error) { var appResource AppResource - r := c.newRequest("GET", "/v2/apps/"+guid+"?inline-relations-depth=2") - resp, err := c.doRequest(r) + r := c.NewRequest("GET", "/v2/apps/"+guid+"?inline-relations-depth=2") + resp, err := c.DoRequest(r) if err != nil { - log.Printf("Error requesting apps %v", err) + return App{}, errors.Wrap(err, "Error requesting apps") } + defer resp.Body.Close() resBody, err := ioutil.ReadAll(resp.Body) if err != nil { - log.Printf("Error reading app request %v", resBody) + return App{}, errors.Wrap(err, "Error reading app response body") } err = json.Unmarshal(resBody, &appResource) if err != nil { - log.Printf("Error unmarshaling app %v", err) + return App{}, errors.Wrap(err, "Error unmarshalling app") } appResource.Entity.Guid = appResource.Meta.Guid appResource.Entity.SpaceData.Entity.Guid = appResource.Entity.SpaceData.Meta.Guid appResource.Entity.SpaceData.Entity.OrgData.Entity.Guid = appResource.Entity.SpaceData.Entity.OrgData.Meta.Guid appResource.Entity.c = c - return appResource.Entity + return appResource.Entity, nil +} + +func (c *Client) AppByGuid(guid string) (App, error) { + return c.GetAppByGuid(guid) +} + +//AppByName takes an appName, and GUIDs for a space and org, and performs +// the API lookup with those query parameters set to return you the desired +// App object. +func (c *Client) AppByName(appName, spaceGuid, orgGuid string) (app App, err error) { + query := url.Values{} + query.Add("q", fmt.Sprintf("organization_guid:%s", orgGuid)) + query.Add("q", fmt.Sprintf("space_guid:%s", spaceGuid)) + query.Add("q", fmt.Sprintf("name:%s", appName)) + apps, err := c.ListAppsByQuery(query) + if err != nil { + return + } + if len(apps) == 0 { + err = fmt.Errorf("No app found with name: `%s` in space with GUID `%s` and org with GUID `%s`", appName, spaceGuid, orgGuid) + return + } + app = apps[0] + return } diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/buildpacks.go b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/buildpacks.go new file mode 100644 index 00000000..4da2f888 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/buildpacks.go @@ -0,0 +1,69 @@ +package cfclient + +import ( + "encoding/json" + "io/ioutil" + + "github.com/pkg/errors" +) + +type BuildpackResponse struct { + Count int `json:"total_results"` + Pages int `json:"total_pages"` + NextUrl string `json:"next_url"` + Resources []BuildpackResource `json:"resources"` +} + +type BuildpackResource struct { + Meta Meta `json:"metadata"` + Entity Buildpack `json:"entity"` +} + +type Buildpack struct { + Guid string `json:"guid"` + Name string `json:"name"` + Enabled bool `json:"enabled"` + Locked bool `json:"locked"` + Filename string `json:"filename"` + c *Client +} + +func (c *Client) ListBuildpacks() ([]Buildpack, error) { + var buildpacks []Buildpack + requestUrl := "/v2/buildpacks" + for { + buildpackResp, err := c.getBuildpackResponse(requestUrl) + if err != nil { + return []Buildpack{}, err + } + for _, buildpack := range buildpackResp.Resources { + buildpack.Entity.Guid = buildpack.Meta.Guid + buildpack.Entity.c = c + buildpacks = append(buildpacks, buildpack.Entity) + } + requestUrl = buildpackResp.NextUrl + if requestUrl == "" { + break + } + } + return buildpacks, nil +} + +func (c *Client) getBuildpackResponse(requestUrl string) (BuildpackResponse, error) { + var buildpackResp BuildpackResponse + r := c.NewRequest("GET", requestUrl) + resp, err := c.DoRequest(r) + if err != nil { + return BuildpackResponse{}, errors.Wrap(err, "Error requesting buildpacks") + } + resBody, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return BuildpackResponse{}, errors.Wrap(err, "Error reading buildpack request") + } + err = json.Unmarshal(resBody, &buildpackResp) + if err != nil { + return BuildpackResponse{}, errors.Wrap(err, "Error unmarshalling buildpack") + } + return buildpackResp, nil +} diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/client.go b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/client.go index d84487b5..5b066dec 100644 --- a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/client.go +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/client.go @@ -4,18 +4,19 @@ import ( "bytes" "crypto/tls" "encoding/json" - "golang.org/x/net/context" - "golang.org/x/oauth2" "io" - "log" "net/http" "net/url" - "os" + + "github.com/pkg/errors" + "golang.org/x/net/context" + "golang.org/x/oauth2" + "golang.org/x/oauth2/clientcredentials" ) //Client used to communicate with Cloud Foundry type Client struct { - config Config + Config Config Endpoint Endpoint } @@ -28,14 +29,16 @@ type Endpoint struct { //Config is used to configure the creation of a client type Config struct { - ApiAddress string - LoginAddress string - Username string - Password string - SkipSslValidation bool + ApiAddress string `json:"api_url"` + Username string `json:"user"` + Password string `json:"password"` + ClientID string `json:"client_id"` + ClientSecret string `json:"client_secret"` + SkipSslValidation bool `json:"skip_ssl_validation"` HttpClient *http.Client - Token string + Token string `json:"auth_token"` TokenSource oauth2.TokenSource + UserAgent string `json:"user_agent"` } // request is used to help build up a request @@ -52,13 +55,13 @@ type request struct { //Need to be remove in close future func DefaultConfig() *Config { return &Config{ - ApiAddress: "https://api.10.244.0.34.xip.io", - LoginAddress: "https://login.10.244.0.34.xip.io", + ApiAddress: "http://api.bosh-lite.com", Username: "admin", Password: "admin", Token: "", SkipSslValidation: false, HttpClient: http.DefaultClient, + UserAgent: "Go-CF-client/1.1", } } @@ -72,7 +75,7 @@ func DefaultEndpoint() *Endpoint { } // NewClient returns a new client -func NewClient(config *Config) *Client { +func NewClient(config *Config) (client *Client, err error) { // bootstrap the config defConfig := DefaultConfig() @@ -80,10 +83,6 @@ func NewClient(config *Config) *Client { config.ApiAddress = defConfig.ApiAddress } - if len(config.LoginAddress) == 0 { - config.LoginAddress = defConfig.LoginAddress - } - if len(config.Username) == 0 { config.Username = defConfig.Username } @@ -96,27 +95,69 @@ func NewClient(config *Config) *Client { config.Token = defConfig.Token } - ctx := oauth2.NoContext - if config.SkipSslValidation == false { - ctx = context.WithValue(ctx, oauth2.HTTPClient, defConfig.HttpClient) - } else { + if len(config.UserAgent) == 0 { + config.UserAgent = defConfig.UserAgent + } - tr := &http.Transport{ - TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, - } + if config.HttpClient == nil { + config.HttpClient = defConfig.HttpClient + } - ctx = context.WithValue(ctx, oauth2.HTTPClient, &http.Client{Transport: tr}) + if config.HttpClient.Transport == nil { + config.HttpClient.Transport = shallowDefaultTransport() + } + tp := config.HttpClient.Transport.(*http.Transport) + if tp.TLSClientConfig == nil { + tp.TLSClientConfig = &tls.Config{} } + // we want to keep the Timeout value from config.HttpClient + timeout := config.HttpClient.Timeout + + ctx := context.Background() + + tp.TLSClientConfig.InsecureSkipVerify = config.SkipSslValidation + ctx = context.WithValue(ctx, oauth2.HTTPClient, config.HttpClient) + endpoint, err := getInfo(config.ApiAddress, oauth2.NewClient(ctx, nil)) if err != nil { - log.Println("Could not get api /v2/info :", err) - os.Exit(1) + return nil, errors.Wrap(err, "Could not get api /v2/info") + } + switch { + case config.Token != "": + config = getUserTokenAuth(config, endpoint, ctx) + case config.ClientID != "": + config = getClientAuth(config, endpoint, ctx) + default: + config, err = getUserAuth(config, endpoint, ctx) + if err != nil { + return nil, err + } + } + // make sure original Timeout value will be used + if config.HttpClient.Timeout != timeout { + config.HttpClient.Timeout = timeout + } + client = &Client{ + Config: *config, + Endpoint: *endpoint, } + return client, nil +} +func shallowDefaultTransport() *http.Transport { + defaultTransport := http.DefaultTransport.(*http.Transport) + return &http.Transport{ + Proxy: defaultTransport.Proxy, + TLSHandshakeTimeout: defaultTransport.TLSHandshakeTimeout, + ExpectContinueTimeout: defaultTransport.ExpectContinueTimeout, + } +} + +func getUserAuth(config *Config, endpoint *Endpoint, ctx context.Context) (*Config, error) { authConfig := &oauth2.Config{ ClientID: "cf", Scopes: []string{""}, @@ -128,20 +169,50 @@ func NewClient(config *Config) *Client { token, err := authConfig.PasswordCredentialsToken(ctx, config.Username, config.Password) - if err != nil { - log.Printf("Error getting token %v\n", err) + return nil, errors.Wrap(err, "Error getting token") } config.TokenSource = authConfig.TokenSource(ctx, token) config.HttpClient = oauth2.NewClient(ctx, config.TokenSource) - client := &Client{ - config: *config, - Endpoint: *endpoint, + return config, err +} + +func getClientAuth(config *Config, endpoint *Endpoint, ctx context.Context) *Config { + authConfig := &clientcredentials.Config{ + ClientID: config.ClientID, + ClientSecret: config.ClientSecret, + TokenURL: endpoint.TokenEndpoint + "/oauth/token", + } + + config.TokenSource = authConfig.TokenSource(ctx) + config.HttpClient = authConfig.Client(ctx) + return config +} + +// Initialize client credentials from existing bearer token +func getUserTokenAuth(config *Config, endpoint *Endpoint, ctx context.Context) *Config { + authConfig := &oauth2.Config{ + ClientID: "cf", + Scopes: []string{""}, + Endpoint: oauth2.Endpoint{ + AuthURL: endpoint.AuthEndpoint + "/oauth/auth", + TokenURL: endpoint.TokenEndpoint + "/oauth/token", + }, } - return client + + // Token is expected to have no "bearer" prefix + token := &oauth2.Token{ + AccessToken: config.Token, + TokenType: "Bearer"} + + config.TokenSource = authConfig.TokenSource(ctx, token) + config.HttpClient = oauth2.NewClient(ctx, config.TokenSource) + + return config } + func getInfo(api string, httpClient *http.Client) (*Endpoint, error) { var endpoint Endpoint @@ -163,24 +234,52 @@ func getInfo(api string, httpClient *http.Client) (*Endpoint, error) { return &endpoint, err } -// newRequest is used to create a new request -func (c *Client) newRequest(method, path string) *request { +// NewRequest is used to create a new request +func (c *Client) NewRequest(method, path string) *request { r := &request{ method: method, - url: c.config.ApiAddress + path, + url: c.Config.ApiAddress + path, params: make(map[string][]string), } return r } -// doRequest runs a request with our client -func (c *Client) doRequest(r *request) (*http.Response, error) { +// NewRequestWithBody is used to create a new request with +// arbigtrary body io.Reader. +func (c *Client) NewRequestWithBody(method, path string, body io.Reader) *request { + r := c.NewRequest(method, path) + + // Set request body + r.body = body + + return r +} + +// DoRequest runs a request with our client +func (c *Client) DoRequest(r *request) (*http.Response, error) { req, err := r.toHTTP() if err != nil { return nil, err } - resp, err := c.config.HttpClient.Do(req) - return resp, err + req.Header.Set("User-Agent", c.Config.UserAgent) + if r.body != nil { + req.Header.Set("Content-type", "application/json") + } + + resp, err := c.Config.HttpClient.Do(req) + if err != nil { + return nil, err + } + + if resp.StatusCode >= http.StatusBadRequest { + var cfErr CloudFoundryError + if err := decodeBody(resp, &cfErr); err != nil { + return resp, errors.Wrap(err, "Unable to decode body") + } + return nil, cfErr + } + + return resp, nil } // toHTTP converts the request to an HTTP request @@ -188,11 +287,11 @@ func (r *request) toHTTP() (*http.Request, error) { // Check if we should encode the body if r.body == nil && r.obj != nil { - if b, err := encodeBody(r.obj); err != nil { + b, err := encodeBody(r.obj) + if err != nil { return nil, err - } else { - r.body = b } + r.body = b } // Create the HTTP request @@ -216,12 +315,10 @@ func encodeBody(obj interface{}) (io.Reader, error) { return buf, nil } -func (c *Client) GetToken() string { - token, err := c.config.TokenSource.Token() +func (c *Client) GetToken() (string, error) { + token, err := c.Config.TokenSource.Token() if err != nil { - log.Printf("Error getting token %v\n", err) - return "" + return "", errors.Wrap(err, "Error getting bearer token") } - - return "bearer " + token.AccessToken + return "bearer " + token.AccessToken, nil } diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/domains.go b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/domains.go new file mode 100644 index 00000000..454f3580 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/domains.go @@ -0,0 +1,194 @@ +package cfclient + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/url" + + "github.com/pkg/errors" +) + +type DomainsResponse struct { + Count int `json:"total_results"` + Pages int `json:"total_pages"` + NextUrl string `json:"next_url"` + Resources []DomainResource `json:"resources"` +} + +type SharedDomainsResponse struct { + Count int `json:"total_results"` + Pages int `json:"total_pages"` + NextUrl string `json:"next_url"` + Resources []SharedDomainResource `json:"resources"` +} + +type DomainResource struct { + Meta Meta `json:"metadata"` + Entity Domain `json:"entity"` +} + +type SharedDomainResource struct { + Meta Meta `json:"metadata"` + Entity SharedDomain `json:"entity"` +} + +type Domain struct { + Guid string `json:"guid"` + Name string `json:"name"` + OwningOrganizationGuid string `json:"owning_organization_guid"` + OwningOrganizationUrl string `json:"owning_organization_url"` + SharedOrganizationsUrl string `json:"shared_organizations_url"` + c *Client +} + +type SharedDomain struct { + Guid string `json:"guid"` + Name string `json:"name"` + RouterGroupGuid string `json:"router_group_guid"` + RouterGroupType string `json:"router_group_type"` + c *Client +} + +func (c *Client) ListDomainsByQuery(query url.Values) ([]Domain, error) { + var domains []Domain + requestUrl := "/v2/private_domains?" + query.Encode() + for { + var domainResp DomainsResponse + r := c.NewRequest("GET", requestUrl) + resp, err := c.DoRequest(r) + if err != nil { + return nil, errors.Wrap(err, "Error requesting domains") + } + resBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, errors.Wrap(err, "Error reading domains request") + } + + err = json.Unmarshal(resBody, &domainResp) + if err != nil { + return nil, errors.Wrap(err, "Error unmarshaling domains") + } + for _, domain := range domainResp.Resources { + domain.Entity.Guid = domain.Meta.Guid + domain.Entity.c = c + domains = append(domains, domain.Entity) + } + requestUrl = domainResp.NextUrl + if requestUrl == "" { + break + } + } + return domains, nil +} + +func (c *Client) ListDomains() ([]Domain, error) { + return c.ListDomainsByQuery(nil) +} + +func (c *Client) ListSharedDomainsByQuery(query url.Values) ([]SharedDomain, error) { + var domains []SharedDomain + requestUrl := "/v2/shared_domains?" + query.Encode() + for { + var domainResp SharedDomainsResponse + r := c.NewRequest("GET", requestUrl) + resp, err := c.DoRequest(r) + if err != nil { + return nil, errors.Wrap(err, "Error requesting shared domains") + } + resBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, errors.Wrap(err, "Error reading shared domains request") + } + + err = json.Unmarshal(resBody, &domainResp) + if err != nil { + return nil, errors.Wrap(err, "Error unmarshaling shared domains") + } + for _, domain := range domainResp.Resources { + domain.Entity.Guid = domain.Meta.Guid + domain.Entity.c = c + domains = append(domains, domain.Entity) + } + requestUrl = domainResp.NextUrl + if requestUrl == "" { + break + } + } + return domains, nil +} + +func (c *Client) ListSharedDomains() ([]SharedDomain, error) { + return c.ListSharedDomainsByQuery(nil) +} + +func (c *Client) GetDomainByName(name string) (Domain, error) { + q := url.Values{} + q.Set("q", "name:"+name) + domains, err := c.ListDomainsByQuery(q) + if err != nil { + return Domain{}, errors.Wrapf(err, "Error during domain lookup %s", name) + } + if len(domains) == 0 { + return Domain{}, errors.New(fmt.Sprintf("Unable to find domain %s", name)) + } + return domains[0], nil +} + +func (c *Client) GetSharedDomainByName(name string) (SharedDomain, error) { + q := url.Values{} + q.Set("q", "name:"+name) + domains, err := c.ListSharedDomainsByQuery(q) + if err != nil { + return SharedDomain{}, errors.Wrapf(err, "Error during shared domain lookup %s", name) + } + if len(domains) == 0 { + return SharedDomain{}, errors.New(fmt.Sprintf("Unable to find shared domain %s", name)) + } + return domains[0], nil +} + +func (c *Client) CreateDomain(name, orgGuid string) (*Domain, error) { + req := c.NewRequest("POST", "/v2/private_domains") + req.obj = map[string]interface{}{ + "name": name, + "owning_organization_guid": orgGuid, + } + resp, err := c.DoRequest(req) + if err != nil { + return nil, err + } + if resp.StatusCode != http.StatusCreated { + return nil, errors.Wrapf(err, "Error creating domain %s, response code: %d", name, resp.StatusCode) + } + return respBodyToDomain(resp.Body, c) +} + +func (c *Client) DeleteDomain(guid string) error { + resp, err := c.DoRequest(c.NewRequest("DELETE", fmt.Sprintf("/v2/private_domains/%s", guid))) + if err != nil { + return err + } + if resp.StatusCode != http.StatusNoContent { + return errors.Wrapf(err, "Error deleting domain %s, response code: %d", guid, resp.StatusCode) + } + return nil +} + +func respBodyToDomain(body io.ReadCloser, c *Client) (*Domain, error) { + bodyRaw, err := ioutil.ReadAll(body) + if err != nil { + return nil, err + } + domainRes := DomainResource{} + err = json.Unmarshal([]byte(bodyRaw), &domainRes) + if err != nil { + return nil, err + } + domain := domainRes.Entity + domain.Guid = domainRes.Meta.Guid + domain.c = c + return &domain, nil +} diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/error.go b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/error.go new file mode 100644 index 00000000..1ae2788e --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/error.go @@ -0,0 +1,27 @@ +package cfclient + +import "fmt" + +type CloudFoundryErrors struct { + Errors []CloudFoundryError `json:"errors"` +} + +func (cfErrs CloudFoundryErrors) Error() string { + err := "" + + for _, cfErr := range cfErrs.Errors { + err += fmt.Sprintf("%s\n", cfErr) + } + + return err +} + +type CloudFoundryError struct { + Code int `json:"code"` + ErrorCode string `json:"error_code"` + Description string `json:"description"` +} + +func (cfErr CloudFoundryError) Error() string { + return fmt.Sprintf("cfclient: error (%d): %s", cfErr.Code, cfErr.ErrorCode) +} diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/isolationsegments.go b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/isolationsegments.go new file mode 100644 index 00000000..fa232569 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/isolationsegments.go @@ -0,0 +1,241 @@ +package cfclient + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/http" + "time" + + "github.com/pkg/errors" +) + +type IsolationSegment struct { + GUID string `json:"guid"` + Name string `json:"name"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + c *Client +} + +type IsolationSegementResponse struct { + GUID string `json:"guid"` + Name string `json:"name"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + Links struct { + Self struct { + Href string `json:"href"` + } `json:"self"` + Spaces struct { + Href string `json:"href"` + } `json:"spaces"` + Organizations struct { + Href string `json:"href"` + } `json:"organizations"` + } `json:"links"` +} + +type Pagination struct { + TotalResults int `json:"total_results"` + TotalPages int `json:"total_pages"` + First struct { + Href string `json:"href"` + } `json:"first"` + Last struct { + Href string `json:"href"` + } `json:"last"` + Next string `json:"next"` + Previous string `json:"previous"` +} + +type ListIsolationSegmentsResponse struct { + Pagination Pagination `json:"pagination"` + Resources []IsolationSegementResponse `json:"resources"` +} + +func (c *Client) CreateIsolationSegment(name string) (*IsolationSegment, error) { + req := c.NewRequest("POST", "/v3/isolation_segments") + req.obj = map[string]interface{}{ + "name": name, + } + resp, err := c.DoRequest(req) + if err != nil { + return nil, errors.Wrap(err, "Error while creating isolation segment") + } + if resp.StatusCode != http.StatusCreated { + return nil, errors.New(fmt.Sprintf("Error creating isolation segment %s, response code: %d", name, resp.StatusCode)) + } + return respBodyToIsolationSegment(resp.Body, c) +} + +func respBodyToIsolationSegment(body io.ReadCloser, c *Client) (*IsolationSegment, error) { + bodyRaw, err := ioutil.ReadAll(body) + if err != nil { + return nil, err + } + isr := IsolationSegementResponse{} + err = json.Unmarshal([]byte(bodyRaw), &isr) + if err != nil { + return nil, err + } + + return &IsolationSegment{ + GUID: isr.GUID, + Name: isr.Name, + CreatedAt: isr.CreatedAt, + UpdatedAt: isr.UpdatedAt, + c: c, + }, nil +} + +func (c *Client) GetIsolationSegmentByGUID(guid string) (*IsolationSegment, error) { + var isr IsolationSegementResponse + r := c.NewRequest("GET", "/v3/isolation_segments/"+guid) + resp, err := c.DoRequest(r) + if err != nil { + return nil, errors.Wrap(err, "Error requesting isolation segment by GUID") + } + defer resp.Body.Close() + resBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, errors.Wrap(err, "Error reading isolation segment response body") + } + + err = json.Unmarshal(resBody, &isr) + if err != nil { + return nil, errors.Wrap(err, "Error unmarshalling isolation segment response") + } + return &IsolationSegment{Name: isr.Name, GUID: isr.GUID, CreatedAt: isr.CreatedAt, UpdatedAt: isr.UpdatedAt, c: c}, nil +} + +func (c *Client) ListIsolationSegments() ([]IsolationSegment, error) { + var iss []IsolationSegment + requestUrl := "/v3/isolation_segments" + for { + var isr ListIsolationSegmentsResponse + r := c.NewRequest("GET", requestUrl) + resp, err := c.DoRequest(r) + if err != nil { + return nil, errors.Wrap(err, "Error requesting isolation segments") + } + defer resp.Body.Close() + resBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, errors.Wrap(err, "Error reading isolation segment request") + } + + err = json.Unmarshal(resBody, &isr) + if err != nil { + return nil, errors.Wrap(err, "Error unmarshalling isolation segment") + } + + for _, is := range isr.Resources { + iss = append(iss, IsolationSegment{ + Name: is.Name, + GUID: is.GUID, + CreatedAt: is.CreatedAt, + UpdatedAt: is.UpdatedAt, + c: c, + }) + } + + requestUrl = isr.Pagination.Next + if requestUrl == "" { + break + } + } + return iss, nil +} + +// TODO listOrgsForIsolationSegments +// TODO listSpacesForIsolationSegments +// TODO setDefaultIsolationSegmentForOrg + +func (c *Client) DeleteIsolationSegmentByGUID(guid string) error { + resp, err := c.DoRequest(c.NewRequest("DELETE", fmt.Sprintf("/v3/isolation_segments/%s", guid))) + if err != nil { + return errors.Wrap(err, "Error during sending DELETE request for isolation segments") + } + if resp.StatusCode != http.StatusNoContent { + return errors.New(fmt.Sprintf("Error deleting isolation segment %s, response code: %d", guid, resp.StatusCode)) + } + return nil +} + +func (i *IsolationSegment) Delete() error { + return i.c.DeleteIsolationSegmentByGUID(i.GUID) +} + +func (i *IsolationSegment) AddOrg(orgGuid string) error { + if i == nil || i.c == nil { + return errors.New("No communication handle.") + } + req := i.c.NewRequest("POST", fmt.Sprintf("/v3/isolation_segments/%s/relationships/organizations", i.GUID)) + type Entry struct { + GUID string `json:"guid"` + } + req.obj = map[string]interface{}{ + "data": []Entry{Entry{GUID: orgGuid}}, + } + resp, err := i.c.DoRequest(req) + if err != nil { + return errors.Wrap(err, "Error during adding org to isolation segment") + } + if resp.StatusCode != http.StatusCreated { + return errors.New(fmt.Sprintf("Error adding org %s to isolation segment %s, response code: %d", orgGuid, i.Name, resp.StatusCode)) + } + return nil +} + +func (i *IsolationSegment) RemoveOrg(orgGuid string) error { + if i == nil || i.c == nil { + return errors.New("No communication handle.") + } + req := i.c.NewRequest("DELETE", fmt.Sprintf("/v3/isolation_segments/%s/relationships/organizations", i.GUID)) + req.obj = map[string]interface{}{ + "guid": orgGuid, + } + resp, err := i.c.DoRequest(req) + if err != nil { + return errors.Wrapf(err, "Error during removing org %s in isolation segment %s", orgGuid, i.Name) + } + if resp.StatusCode != http.StatusNoContent { + return errors.New(fmt.Sprintf("Error deleting org %s in isolation segment %s, response code: %d", orgGuid, i.Name, resp.StatusCode)) + } + return nil +} + +func (i *IsolationSegment) AddSpace(spaceGuid string) error { + if i == nil || i.c == nil { + return errors.New("No communication handle.") + } + req := i.c.NewRequest("PUT", fmt.Sprintf("/v2/spaces/%s", spaceGuid)) + req.obj = map[string]interface{}{ + "isolation_segment_guid": i.GUID, + } + resp, err := i.c.DoRequest(req) + if err != nil { + return errors.Wrapf(err, "Error during adding space %s to isolation segment %s", spaceGuid, i.Name) + } + if resp.StatusCode != http.StatusCreated { + return errors.New(fmt.Sprintf("Error adding space to isolation segment %s, response code: %d", i.Name, resp.StatusCode)) + } + return nil +} + +func (i *IsolationSegment) RemoveSpace(spaceGuid string) error { + if i == nil || i.c == nil { + return errors.New("No communication handle.") + } + req := i.c.NewRequest("DELETE", fmt.Sprintf("/v2/spaces/%s/isolation_segment", spaceGuid)) + resp, err := i.c.DoRequest(req) + if err != nil { + return errors.Wrapf(err, "Error during deleting space %s in isolation segment %s", spaceGuid, i.Name) + } + if resp.StatusCode != http.StatusNoContent { + return errors.New(fmt.Sprintf("Error deleting space %s from isolation segment %s, response code: %d", spaceGuid, i.Name, resp.StatusCode)) + } + return nil +} diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/org_quotas.go b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/org_quotas.go new file mode 100644 index 00000000..b879e5ab --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/org_quotas.go @@ -0,0 +1,96 @@ +package cfclient + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/url" + + "github.com/pkg/errors" +) + +type OrgQuotasResponse struct { + Count int `json:"total_results"` + Pages int `json:"total_pages"` + NextUrl string `json:"next_url"` + Resources []OrgQuotasResource `json:"resources"` +} + +type OrgQuotasResource struct { + Meta Meta `json:"metadata"` + Entity OrgQuota `json:"entity"` +} + +type OrgQuota struct { + Guid string `json:"guid"` + Name string `json:"name"` + NonBasicServicesAllowed bool `json:"non_basic_services_allowed"` + TotalServices int `json:"total_services"` + TotalRoutes int `json:"total_routes"` + TotalPrivateDomains int `json:"total_private_domains"` + MemoryLimit int `json:"memory_limit"` + TrialDBAllowed bool `json:"trial_db_allowed"` + InstanceMemoryLimit int `json:"instance_memory_limit"` + AppInstanceLimit int `json:"app_instance_limit"` + AppTaskLimit int `json:"app_task_limit"` + TotalServiceKeys int `json:"total_service_keys"` + TotalReservedRoutePorts int `json:"total_reserved_route_ports"` + c *Client +} + +func (c *Client) ListOrgQuotasByQuery(query url.Values) ([]OrgQuota, error) { + var orgQuotas []OrgQuota + requestUrl := "/v2/quota_definitions?" + query.Encode() + for { + orgQuotasResp, err := c.getOrgQuotasResponse(requestUrl) + if err != nil { + return []OrgQuota{}, err + } + for _, org := range orgQuotasResp.Resources { + org.Entity.Guid = org.Meta.Guid + org.Entity.c = c + orgQuotas = append(orgQuotas, org.Entity) + } + requestUrl = orgQuotasResp.NextUrl + if requestUrl == "" { + break + } + } + return orgQuotas, nil +} + +func (c *Client) ListOrgQuotas() ([]OrgQuota, error) { + return c.ListOrgQuotasByQuery(nil) +} + +func (c *Client) GetOrgQuotaByName(name string) (OrgQuota, error) { + q := url.Values{} + q.Set("q", "name:"+name) + orgQuotas, err := c.ListOrgQuotasByQuery(q) + if err != nil { + return OrgQuota{}, err + } + if len(orgQuotas) != 1 { + return OrgQuota{}, fmt.Errorf("Unable to find org quota " + name) + } + return orgQuotas[0], nil +} + +func (c *Client) getOrgQuotasResponse(requestUrl string) (OrgQuotasResponse, error) { + var orgQuotasResp OrgQuotasResponse + r := c.NewRequest("GET", requestUrl) + resp, err := c.DoRequest(r) + if err != nil { + return OrgQuotasResponse{}, errors.Wrap(err, "Error requesting org quotas") + } + resBody, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return OrgQuotasResponse{}, errors.Wrap(err, "Error reading org quotas body") + } + err = json.Unmarshal(resBody, &orgQuotasResp) + if err != nil { + return OrgQuotasResponse{}, errors.Wrap(err, "Error unmarshalling org quotas") + } + return orgQuotasResp, nil +} diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/orgs.go b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/orgs.go index 8b12cb84..723841d0 100644 --- a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/orgs.go +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/orgs.go @@ -1,15 +1,21 @@ package cfclient import ( + "bytes" "encoding/json" "fmt" "io/ioutil" "log" + "net/http" + "net/url" + + "github.com/pkg/errors" ) type OrgResponse struct { Count int `json:"total_results"` Pages int `json:"total_pages"` + NextUrl string `json:"next_url"` Resources []OrgResource `json:"resources"` } @@ -19,44 +25,102 @@ type OrgResource struct { } type Org struct { - Guid string `json:"guid"` - Name string `json:"name"` - c *Client + Guid string `json:"guid"` + Name string `json:"name"` + QuotaDefinitionGuid string `json:"quota_definition_guid"` + c *Client +} + +type OrgSummary struct { + Guid string `json:"guid"` + Name string `json:"name"` + Status string `json:"status"` + Spaces []OrgSummarySpaces `json:"spaces"` } -func (c *Client) ListOrgs() []Org { +type OrgSummarySpaces struct { + Guid string `json:"guid"` + Name string `json:"name"` + ServiceCount int `json:"service_count"` + AppCount int `json:"app_count"` + MemDevTotal int `json:"mem_dev_total"` + MemProdTotal int `json:"mem_prod_total"` +} + +type OrgRequest struct { + Name string `json:"name"` + Status string `json:"status,omitempty"` + QuotaDefinitionGuid string `json:"quota_definition_guid,omitempty"` +} + +func (c *Client) ListOrgsByQuery(query url.Values) ([]Org, error) { var orgs []Org - var orgResp OrgResponse - r := c.newRequest("GET", "/v2/organizations") - resp, err := c.doRequest(r) - if err != nil { - log.Printf("Error requesting organizations %v", err) + requestUrl := "/v2/organizations?" + query.Encode() + for { + orgResp, err := c.getOrgResponse(requestUrl) + if err != nil { + return []Org{}, err + } + for _, org := range orgResp.Resources { + org.Entity.Guid = org.Meta.Guid + org.Entity.c = c + orgs = append(orgs, org.Entity) + } + requestUrl = orgResp.NextUrl + if requestUrl == "" { + break + } } - resBody, err := ioutil.ReadAll(resp.Body) + return orgs, nil +} + +func (c *Client) ListOrgs() ([]Org, error) { + return c.ListOrgsByQuery(nil) +} + +func (c *Client) GetOrgByName(name string) (Org, error) { + var org Org + q := url.Values{} + q.Set("q", "name:"+name) + orgs, err := c.ListOrgsByQuery(q) if err != nil { - log.Printf("Error reading organization request %v", resBody) + return org, err } + if len(orgs) == 0 { + return org, fmt.Errorf("Unable to find org %s", name) + } + return orgs[0], nil +} - err = json.Unmarshal(resBody, &orgResp) +func (c *Client) GetOrgByGuid(guid string) (Org, error) { + var orgRes OrgResource + r := c.NewRequest("GET", "/v2/organizations/"+guid) + resp, err := c.DoRequest(r) if err != nil { - log.Printf("Error unmarshalling organization %v", err) + return Org{}, err } - for _, org := range orgResp.Resources { - org.Entity.Guid = org.Meta.Guid - org.Entity.c = c - orgs = append(orgs, org.Entity) + body, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return Org{}, err } - return orgs + err = json.Unmarshal(body, &orgRes) + if err != nil { + return Org{}, err + } + orgRes.Entity.Guid = orgRes.Meta.Guid + orgRes.Entity.c = c + return orgRes.Entity, nil } -func (c *Client) OrgSpaces(guid string) []Space { +func (c *Client) OrgSpaces(guid string) ([]Space, error) { var spaces []Space var spaceResp SpaceResponse path := fmt.Sprintf("/v2/organizations/%s/spaces", guid) - r := c.newRequest("GET", path) - resp, err := c.doRequest(r) + r := c.NewRequest("GET", path) + resp, err := c.DoRequest(r) if err != nil { - log.Printf("Error requesting space %v", err) + return nil, errors.Wrap(err, "Error requesting space") } resBody, err := ioutil.ReadAll(resp.Body) if err != nil { @@ -65,13 +129,390 @@ func (c *Client) OrgSpaces(guid string) []Space { err = json.Unmarshal(resBody, &spaceResp) if err != nil { - log.Printf("Error space organization %v", err) + return nil, errors.Wrap(err, "Error space organization") } for _, space := range spaceResp.Resources { space.Entity.Guid = space.Meta.Guid + space.Entity.c = c spaces = append(spaces, space.Entity) } - return spaces + return spaces, nil +} + +func (o *Org) Summary() (OrgSummary, error) { + var orgSummary OrgSummary + requestUrl := fmt.Sprintf("/v2/organizations/%s/summary", o.Guid) + r := o.c.NewRequest("GET", requestUrl) + resp, err := o.c.DoRequest(r) + if err != nil { + return OrgSummary{}, errors.Wrap(err, "Error requesting org summary") + } + resBody, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return OrgSummary{}, errors.Wrap(err, "Error reading org summary body") + } + err = json.Unmarshal(resBody, &orgSummary) + if err != nil { + return OrgSummary{}, errors.Wrap(err, "Error unmarshalling org summary") + } + return orgSummary, nil +} + +func (o *Org) Quota() (*OrgQuota, error) { + var orgQuota *OrgQuota + var orgQuotaResource OrgQuotasResource + if o.QuotaDefinitionGuid == "" { + return nil, nil + } + requestUrl := fmt.Sprintf("/v2/quota_definitions/%s", o.QuotaDefinitionGuid) + r := o.c.NewRequest("GET", requestUrl) + resp, err := o.c.DoRequest(r) + if err != nil { + return &OrgQuota{}, errors.Wrap(err, "Error requesting org quota") + } + resBody, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return &OrgQuota{}, errors.Wrap(err, "Error reading org quota body") + } + err = json.Unmarshal(resBody, &orgQuotaResource) + if err != nil { + return &OrgQuota{}, errors.Wrap(err, "Error unmarshalling org quota") + } + orgQuota = &orgQuotaResource.Entity + orgQuota.Guid = orgQuotaResource.Meta.Guid + orgQuota.c = o.c + return orgQuota, nil +} + +func (c *Client) AssociateOrgManager(orgGUID, userGUID string) (Org, error) { + org := Org{Guid: orgGUID, c: c} + return org.AssociateManager(userGUID) +} + +func (c *Client) AssociateOrgManagerByUsername(orgGUID, name string) (Org, error) { + org := Org{Guid: orgGUID, c: c} + return org.AssociateManagerByUsername(name) +} + +func (c *Client) AssociateOrgUser(orgGUID, userGUID string) (Org, error) { + org := Org{Guid: orgGUID, c: c} + return org.AssociateUser(userGUID) +} + +func (c *Client) AssociateOrgAuditor(orgGUID, userGUID string) (Org, error) { + org := Org{Guid: orgGUID, c: c} + return org.AssociateAuditor(userGUID) +} + +func (c *Client) AssociateOrgUserByUsername(orgGUID, name string) (Org, error) { + org := Org{Guid: orgGUID, c: c} + return org.AssociateUserByUsername(name) +} + +func (c *Client) AssociateOrgAuditorByUsername(orgGUID, name string) (Org, error) { + org := Org{Guid: orgGUID, c: c} + return org.AssociateAuditorByUsername(name) +} + +func (c *Client) RemoveOrgManager(orgGUID, userGUID string) error { + org := Org{Guid: orgGUID, c: c} + return org.RemoveManager(userGUID) +} + +func (c *Client) RemoveOrgManagerByUsername(orgGUID, name string) error { + org := Org{Guid: orgGUID, c: c} + return org.RemoveManagerByUsername(name) +} + +func (c *Client) RemoveOrgUser(orgGUID, userGUID string) error { + org := Org{Guid: orgGUID, c: c} + return org.RemoveUser(userGUID) +} + +func (c *Client) RemoveOrgAuditor(orgGUID, userGUID string) error { + org := Org{Guid: orgGUID, c: c} + return org.RemoveAuditor(userGUID) +} + +func (c *Client) RemoveOrgUserByUsername(orgGUID, name string) error { + org := Org{Guid: orgGUID, c: c} + return org.RemoveUserByUsername(name) +} + +func (c *Client) RemoveOrgAuditorByUsername(orgGUID, name string) error { + org := Org{Guid: orgGUID, c: c} + return org.RemoveAuditorByUsername(name) +} + +func (o *Org) AssociateManager(userGUID string) (Org, error) { + requestUrl := fmt.Sprintf("/v2/organizations/%s/managers/%s", o.Guid, userGUID) + r := o.c.NewRequest("PUT", requestUrl) + resp, err := o.c.DoRequest(r) + if err != nil { + return Org{}, err + } + if resp.StatusCode != http.StatusCreated { + return Org{}, errors.Wrapf(err, "Error associating manager %s, response code: %d", userGUID, resp.StatusCode) + } + return o.c.handleOrgResp(resp) +} + +func (o *Org) AssociateManagerByUsername(name string) (Org, error) { + requestUrl := fmt.Sprintf("/v2/organizations/%s/managers", o.Guid) + buf := bytes.NewBuffer(nil) + err := json.NewEncoder(buf).Encode(map[string]string{"username": name}) + if err != nil { + return Org{}, err + } + r := o.c.NewRequestWithBody("PUT", requestUrl, buf) + resp, err := o.c.DoRequest(r) + if err != nil { + return Org{}, err + } + if resp.StatusCode != http.StatusCreated { + return Org{}, errors.Wrapf(err, "Error associating manager %s, response code: %d", name, resp.StatusCode) + } + return o.c.handleOrgResp(resp) +} + +func (o *Org) AssociateUser(userGUID string) (Org, error) { + requestUrl := fmt.Sprintf("/v2/organizations/%s/users/%s", o.Guid, userGUID) + r := o.c.NewRequest("PUT", requestUrl) + resp, err := o.c.DoRequest(r) + if err != nil { + return Org{}, err + } + if resp.StatusCode != http.StatusCreated { + return Org{}, errors.Wrapf(err, "Error associating user %s, response code: %d", userGUID, resp.StatusCode) + } + return o.c.handleOrgResp(resp) +} +func (o *Org) AssociateAuditor(userGUID string) (Org, error) { + requestUrl := fmt.Sprintf("/v2/organizations/%s/auditors/%s", o.Guid, userGUID) + r := o.c.NewRequest("PUT", requestUrl) + resp, err := o.c.DoRequest(r) + if err != nil { + return Org{}, err + } + if resp.StatusCode != http.StatusCreated { + return Org{}, errors.Wrapf(err, "Error associating auditor %s, response code: %d", userGUID, resp.StatusCode) + } + return o.c.handleOrgResp(resp) +} + +func (o *Org) AssociateUserByUsername(name string) (Org, error) { + requestUrl := fmt.Sprintf("/v2/organizations/%s/users", o.Guid) + buf := bytes.NewBuffer(nil) + err := json.NewEncoder(buf).Encode(map[string]string{"username": name}) + if err != nil { + return Org{}, err + } + r := o.c.NewRequestWithBody("PUT", requestUrl, buf) + resp, err := o.c.DoRequest(r) + if err != nil { + return Org{}, err + } + if resp.StatusCode != http.StatusCreated { + return Org{}, errors.Wrapf(err, "Error associating user %s, response code: %d", name, resp.StatusCode) + } + return o.c.handleOrgResp(resp) +} + +func (o *Org) AssociateAuditorByUsername(name string) (Org, error) { + requestUrl := fmt.Sprintf("/v2/organizations/%s/auditors", o.Guid) + buf := bytes.NewBuffer(nil) + err := json.NewEncoder(buf).Encode(map[string]string{"username": name}) + if err != nil { + return Org{}, err + } + r := o.c.NewRequestWithBody("PUT", requestUrl, buf) + resp, err := o.c.DoRequest(r) + if err != nil { + return Org{}, err + } + if resp.StatusCode != http.StatusCreated { + return Org{}, errors.Wrapf(err, "Error associating auditor %s, response code: %d", name, resp.StatusCode) + } + return o.c.handleOrgResp(resp) +} + +func (o *Org) RemoveManager(userGUID string) error { + requestUrl := fmt.Sprintf("/v2/organizations/%s/managers/%s", o.Guid, userGUID) + r := o.c.NewRequest("DELETE", requestUrl) + resp, err := o.c.DoRequest(r) + if err != nil { + return err + } + if resp.StatusCode != http.StatusNoContent { + return errors.Wrapf(err, "Error removing manager %s, response code: %d", userGUID, resp.StatusCode) + } + return nil +} + +func (o *Org) RemoveManagerByUsername(name string) error { + requestUrl := fmt.Sprintf("/v2/organizations/%s/managers", o.Guid) + buf := bytes.NewBuffer(nil) + err := json.NewEncoder(buf).Encode(map[string]string{"username": name}) + if err != nil { + return err + } + r := o.c.NewRequestWithBody("DELETE", requestUrl, buf) + resp, err := o.c.DoRequest(r) + if err != nil { + return err + } + if resp.StatusCode != http.StatusNoContent { + return errors.Wrapf(err, "Error removing manager %s, response code: %d", name, resp.StatusCode) + } + return nil +} + +func (o *Org) RemoveUser(userGUID string) error { + requestUrl := fmt.Sprintf("/v2/organizations/%s/users/%s", o.Guid, userGUID) + r := o.c.NewRequest("DELETE", requestUrl) + resp, err := o.c.DoRequest(r) + if err != nil { + return err + } + if resp.StatusCode != http.StatusNoContent { + return errors.Wrapf(err, "Error removing user %s, response code: %d", userGUID, resp.StatusCode) + } + return nil +} + +func (o *Org) RemoveAuditor(userGUID string) error { + requestUrl := fmt.Sprintf("/v2/organizations/%s/auditors/%s", o.Guid, userGUID) + r := o.c.NewRequest("DELETE", requestUrl) + resp, err := o.c.DoRequest(r) + if err != nil { + return err + } + if resp.StatusCode != http.StatusNoContent { + return errors.Wrapf(err, "Error removing auditor %s, response code: %d", userGUID, resp.StatusCode) + } + return nil +} + +func (o *Org) RemoveUserByUsername(name string) error { + requestUrl := fmt.Sprintf("/v2/organizations/%s/users", o.Guid) + buf := bytes.NewBuffer(nil) + err := json.NewEncoder(buf).Encode(map[string]string{"username": name}) + if err != nil { + return err + } + r := o.c.NewRequestWithBody("DELETE", requestUrl, buf) + resp, err := o.c.DoRequest(r) + if err != nil { + return err + } + if resp.StatusCode != http.StatusNoContent { + return errors.Wrapf(err, "Error removing user %s, response code: %d", name, resp.StatusCode) + } + return nil +} + +func (o *Org) RemoveAuditorByUsername(name string) error { + requestUrl := fmt.Sprintf("/v2/organizations/%s/auditors", o.Guid) + buf := bytes.NewBuffer(nil) + err := json.NewEncoder(buf).Encode(map[string]string{"username": name}) + if err != nil { + return err + } + r := o.c.NewRequestWithBody("DELETE", requestUrl, buf) + resp, err := o.c.DoRequest(r) + if err != nil { + return err + } + if resp.StatusCode != http.StatusNoContent { + return errors.Wrapf(err, "Error removing auditor %s, response code: %d", name, resp.StatusCode) + } + return nil +} + +func (c *Client) CreateOrg(req OrgRequest) (Org, error) { + buf := bytes.NewBuffer(nil) + err := json.NewEncoder(buf).Encode(req) + if err != nil { + return Org{}, err + } + r := c.NewRequestWithBody("POST", "/v2/organizations", buf) + resp, err := c.DoRequest(r) + if err != nil { + return Org{}, err + } + if resp.StatusCode != http.StatusCreated { + return Org{}, errors.Wrapf(err, "Error creating organization, response code: %d", resp.StatusCode) + } + return c.handleOrgResp(resp) +} + +func (c *Client) DeleteOrg(guid string, recursive bool) error { + resp, err := c.DoRequest(c.NewRequest("DELETE", fmt.Sprintf("/v2/organizations/%s?recursive=%t", guid, recursive))) + if err != nil { + return err + } + if resp.StatusCode != http.StatusNoContent { + return errors.Wrapf(err, "Error deleting organization %s, response code: %d", guid, resp.StatusCode) + } + return nil +} + +func (c *Client) getOrgResponse(requestUrl string) (OrgResponse, error) { + var orgResp OrgResponse + r := c.NewRequest("GET", requestUrl) + resp, err := c.DoRequest(r) + if err != nil { + return OrgResponse{}, errors.Wrap(err, "Error requesting orgs") + } + resBody, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return OrgResponse{}, errors.Wrap(err, "Error reading org request") + } + err = json.Unmarshal(resBody, &orgResp) + if err != nil { + return OrgResponse{}, errors.Wrap(err, "Error unmarshalling org") + } + return orgResp, nil +} + +func (c *Client) fetchOrgs(requestUrl string) ([]Org, error) { + var orgs []Org + for { + orgResp, err := c.getOrgResponse(requestUrl) + if err != nil { + return []Org{}, err + } + for _, org := range orgResp.Resources { + org.Entity.Guid = org.Meta.Guid + org.Entity.c = c + orgs = append(orgs, org.Entity) + } + requestUrl = orgResp.NextUrl + if requestUrl == "" { + break + } + } + return orgs, nil +} + +func (c *Client) handleOrgResp(resp *http.Response) (Org, error) { + body, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return Org{}, err + } + var orgResource OrgResource + err = json.Unmarshal(body, &orgResource) + if err != nil { + return Org{}, err + } + org := orgResource.Entity + org.Guid = orgResource.Meta.Guid + org.c = c + return org, nil } diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/routes.go b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/routes.go new file mode 100644 index 00000000..0b8b0560 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/routes.go @@ -0,0 +1,117 @@ +package cfclient + +import ( + "bytes" + "encoding/json" + "io/ioutil" + "net/url" + + "github.com/pkg/errors" +) + +type RoutesResponse struct { + Count int `json:"total_results"` + Pages int `json:"total_pages"` + NextUrl string `json:"next_url"` + Resources []RoutesResource `json:"resources"` +} + +type RoutesResource struct { + Meta Meta `json:"metadata"` + Entity Route `json:"entity"` +} + +type RouteRequest struct { + DomainGuid string `json:"domain_guid"` + SpaceGuid string `json:"space_guid"` +} + +type Route struct { + Guid string `json:"guid"` + Host string `json:"host"` + Path string `json:"path"` + DomainGuid string `json:"domain_guid"` + SpaceGuid string `json:"space_guid"` + ServiceInstanceGuid string `json:"service_instance_guid"` + Port int `json:"port"` + c *Client +} + +func (c *Client) CreateTcpRoute(routeRequest RouteRequest) (Route, error) { + routesResource, err := c.createRoute("/v2/routes?generate_port=true", routeRequest) + if nil != err { + return Route{}, err + } + return routesResource.Entity, nil +} + +func (c *Client) ListRoutesByQuery(query url.Values) ([]Route, error) { + return c.fetchRoutes("/v2/routes?" + query.Encode()) +} + +func (c *Client) fetchRoutes(requestUrl string) ([]Route, error) { + var routes []Route + for { + routesResp, err := c.getRoutesResponse(requestUrl) + if err != nil { + return []Route{}, err + } + for _, route := range routesResp.Resources { + route.Entity.Guid = route.Meta.Guid + route.Entity.c = c + routes = append(routes, route.Entity) + } + requestUrl = routesResp.NextUrl + if requestUrl == "" { + break + } + } + return routes, nil +} + +func (c *Client) ListRoutes() ([]Route, error) { + return c.ListRoutesByQuery(nil) +} + +func (c *Client) getRoutesResponse(requestUrl string) (RoutesResponse, error) { + var routesResp RoutesResponse + r := c.NewRequest("GET", requestUrl) + resp, err := c.DoRequest(r) + if err != nil { + return RoutesResponse{}, errors.Wrap(err, "Error requesting routes") + } + resBody, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return RoutesResponse{}, errors.Wrap(err, "Error reading routes body") + } + err = json.Unmarshal(resBody, &routesResp) + if err != nil { + return RoutesResponse{}, errors.Wrap(err, "Error unmarshalling routes") + } + return routesResp, nil +} + +func (c *Client) createRoute(requestUrl string, routeRequest RouteRequest) (RoutesResource, error) { + var routeResp RoutesResource + buf := bytes.NewBuffer(nil) + err := json.NewEncoder(buf).Encode(routeRequest) + if err != nil { + return RoutesResource{}, errors.Wrap(err, "Error creating route - failed to serialize request body") + } + r := c.NewRequestWithBody("POST", requestUrl, buf) + resp, err := c.DoRequest(r) + if err != nil { + return RoutesResource{}, errors.Wrap(err, "Error creating route") + } + resBody, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return RoutesResource{}, errors.Wrap(err, "Error creating route") + } + err = json.Unmarshal(resBody, &routeResp) + if err != nil { + return RoutesResource{}, errors.Wrap(err, "Error unmarshalling routes") + } + return routeResp, nil +} diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/secgroups.go b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/secgroups.go new file mode 100644 index 00000000..33a04a4c --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/secgroups.go @@ -0,0 +1,369 @@ +package cfclient + +import ( + "encoding/json" + "fmt" + "io" + "io/ioutil" + "reflect" + "strings" + + "github.com/pkg/errors" +) + +type SecGroupResponse struct { + Count int `json:"total_results"` + Pages int `json:"total_pages"` + NextUrl string `json:"next_url"` + Resources []SecGroupResource `json:"resources"` +} + +type SecGroupCreateResponse struct { + Code int `json:"code"` + ErrorCode string `json:"error_code"` + Description string `json:"description"` +} + +type SecGroupResource struct { + Meta Meta `json:"metadata"` + Entity SecGroup `json:"entity"` +} + +type SecGroup struct { + Guid string `json:"guid"` + Name string `json:"name"` + Rules []SecGroupRule `json:"rules"` + Running bool `json:"running_default"` + Staging bool `json:"staging_default"` + SpacesURL string `json:"spaces_url"` + SpacesData []SpaceResource `json:"spaces"` + c *Client +} + +type SecGroupRule struct { + Protocol string `json:"protocol"` + Ports string `json:"ports,omitempty"` //e.g. "4000-5000,9142" + Destination string `json:"destination"` //CIDR Format + Description string `json:"description,omitempty"` //Optional description + Code int `json:"code"` // ICMP code + Type int `json:"type"` //ICMP type. Only valid if Protocol=="icmp" + Log bool `json:"log,omitempty"` //If true, log this rule +} + +func (c *Client) ListSecGroups() (secGroups []SecGroup, err error) { + requestURL := "/v2/security_groups?inline-relations-depth=1" + for requestURL != "" { + var secGroupResp SecGroupResponse + r := c.NewRequest("GET", requestURL) + resp, err := c.DoRequest(r) + + if err != nil { + return nil, errors.Wrap(err, "Error requesting sec groups") + } + resBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, errors.Wrap(err, "Error reading sec group response body") + } + + err = json.Unmarshal(resBody, &secGroupResp) + if err != nil { + return nil, errors.Wrap(err, "Error unmarshaling sec group") + } + + for _, secGroup := range secGroupResp.Resources { + secGroup.Entity.Guid = secGroup.Meta.Guid + secGroup.Entity.c = c + for i, space := range secGroup.Entity.SpacesData { + space.Entity.Guid = space.Meta.Guid + secGroup.Entity.SpacesData[i] = space + } + if len(secGroup.Entity.SpacesData) == 0 { + spaces, err := secGroup.Entity.ListSpaceResources() + if err != nil { + return nil, err + } + for _, space := range spaces { + secGroup.Entity.SpacesData = append(secGroup.Entity.SpacesData, space) + } + } + secGroups = append(secGroups, secGroup.Entity) + } + + requestURL = secGroupResp.NextUrl + resp.Body.Close() + } + return secGroups, nil +} + +func (c *Client) GetSecGroupByName(name string) (secGroup SecGroup, err error) { + requestURL := "/v2/security_groups?q=name:" + name + var secGroupResp SecGroupResponse + r := c.NewRequest("GET", requestURL) + resp, err := c.DoRequest(r) + + if err != nil { + return secGroup, errors.Wrap(err, "Error requesting sec groups") + } + resBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return secGroup, errors.Wrap(err, "Error reading sec group response body") + } + + err = json.Unmarshal(resBody, &secGroupResp) + if err != nil { + return secGroup, errors.Wrap(err, "Error unmarshaling sec group") + } + if len(secGroupResp.Resources) == 0 { + return secGroup, fmt.Errorf("No security group with name %v found", name) + } + secGroup = secGroupResp.Resources[0].Entity + secGroup.Guid = secGroupResp.Resources[0].Meta.Guid + secGroup.c = c + + resp.Body.Close() + return secGroup, nil +} + +func (secGroup *SecGroup) ListSpaceResources() ([]SpaceResource, error) { + var spaceResources []SpaceResource + requestURL := secGroup.SpacesURL + for requestURL != "" { + spaceResp, err := secGroup.c.getSpaceResponse(requestURL) + if err != nil { + return []SpaceResource{}, err + } + for i, spaceRes := range spaceResp.Resources { + spaceRes.Entity.Guid = spaceRes.Meta.Guid + spaceResp.Resources[i] = spaceRes + } + spaceResources = append(spaceResources, spaceResp.Resources...) + requestURL = spaceResp.NextUrl + } + return spaceResources, nil +} + +/* +CreateSecGroup contacts the CF endpoint for creating a new security group. +name: the name to give to the created security group +rules: A slice of rule objects that describe the rules that this security group enforces. + This can technically be nil or an empty slice - we won't judge you +spaceGuids: The security group will be associated with the spaces specified by the contents of this slice. + If nil, the security group will not be associated with any spaces initially. +*/ +func (c *Client) CreateSecGroup(name string, rules []SecGroupRule, spaceGuids []string) (*SecGroup, error) { + return c.secGroupCreateHelper("/v2/security_groups", "POST", name, rules, spaceGuids) +} + +/* +UpdateSecGroup contacts the CF endpoint to update an existing security group. +guid: identifies the security group that you would like to update. +name: the new name to give to the security group +rules: A slice of rule objects that describe the rules that this security group enforces. + If this is left nil, the rules will not be changed. +spaceGuids: The security group will be associated with the spaces specified by the contents of this slice. + If nil, the space associations will not be changed. +*/ +func (c *Client) UpdateSecGroup(guid, name string, rules []SecGroupRule, spaceGuids []string) (*SecGroup, error) { + return c.secGroupCreateHelper("/v2/security_groups/"+guid, "PUT", name, rules, spaceGuids) +} + +/* +DeleteSecGroup contacts the CF endpoint to delete an existing security group. +guid: Indentifies the security group to be deleted. +*/ +func (c *Client) DeleteSecGroup(guid string) error { + //Perform the DELETE and check for errors + resp, err := c.DoRequest(c.NewRequest("DELETE", fmt.Sprintf("/v2/security_groups/%s", guid))) + if err != nil { + return err + } + if resp.StatusCode != 204 { //204 No Content + return fmt.Errorf("CF API returned with status code %d", resp.StatusCode) + } + return nil +} + +/* +GetSecGroup contacts the CF endpoint for fetching the info for a particular security group. +guid: Identifies the security group to fetch information from +*/ +func (c *Client) GetSecGroup(guid string) (*SecGroup, error) { + //Perform the GET and check for errors + resp, err := c.DoRequest(c.NewRequest("GET", "/v2/security_groups/"+guid)) + if err != nil { + return nil, err + } + if resp.StatusCode != 200 { + return nil, fmt.Errorf("CF API returned with status code %d", resp.StatusCode) + } + //get the json out of the response body + return respBodyToSecGroup(resp.Body, c) +} + +/* +BindSecGroup contacts the CF endpoint to associate a space with a security group +secGUID: identifies the security group to add a space to +spaceGUID: identifies the space to associate +*/ +func (c *Client) BindSecGroup(secGUID, spaceGUID string) error { + //Perform the PUT and check for errors + resp, err := c.DoRequest(c.NewRequest("PUT", fmt.Sprintf("/v2/security_groups/%s/spaces/%s", secGUID, spaceGUID))) + if err != nil { + return err + } + if resp.StatusCode != 201 { //201 Created + return fmt.Errorf("CF API returned with status code %d", resp.StatusCode) + } + return nil +} + +/* +BindRunningSecGroup contacts the CF endpoint to associate a security group +secGUID: identifies the security group to add a space to +*/ +func (c *Client) BindRunningSecGroup(secGUID string) error { + //Perform the PUT and check for errors + resp, err := c.DoRequest(c.NewRequest("PUT", fmt.Sprintf("/v2/config/running_security_groups/%s", secGUID))) + if err != nil { + return err + } + if resp.StatusCode != 200 { //200 + return fmt.Errorf("CF API returned with status code %d", resp.StatusCode) + } + return nil +} + +/* +BindStagingSecGroup contacts the CF endpoint to associate a space with a security group +secGUID: identifies the security group to add a space to +*/ +func (c *Client) BindStagingSecGroup(secGUID string) error { + //Perform the PUT and check for errors + resp, err := c.DoRequest(c.NewRequest("PUT", fmt.Sprintf("/v2/config/staging_security_groups/%s", secGUID))) + if err != nil { + return err + } + if resp.StatusCode != 200 { //200 + return fmt.Errorf("CF API returned with status code %d", resp.StatusCode) + } + return nil +} + +/* +UnbindSecGroup contacts the CF endpoint to dissociate a space from a security group +secGUID: identifies the security group to remove a space from +spaceGUID: identifies the space to dissociate from the security group +*/ +func (c *Client) UnbindSecGroup(secGUID, spaceGUID string) error { + //Perform the DELETE and check for errors + resp, err := c.DoRequest(c.NewRequest("DELETE", fmt.Sprintf("/v2/security_groups/%s/spaces/%s", secGUID, spaceGUID))) + if err != nil { + return err + } + if resp.StatusCode != 204 { //204 No Content + return fmt.Errorf("CF API returned with status code %d", resp.StatusCode) + } + return nil +} + +//Reads most security group response bodies into a SecGroup object +func respBodyToSecGroup(body io.ReadCloser, c *Client) (*SecGroup, error) { + //get the json from the response body + bodyRaw, err := ioutil.ReadAll(body) + if err != nil { + return nil, errors.Wrap(err, "Could not read response body") + } + jStruct := SecGroupResource{} + //make it a SecGroup + err = json.Unmarshal([]byte(bodyRaw), &jStruct) + if err != nil { + return nil, errors.Wrap(err, "Could not unmarshal response body as json") + } + //pull a few extra fields from other places + ret := jStruct.Entity + ret.Guid = jStruct.Meta.Guid + ret.c = c + return &ret, nil +} + +func ConvertStructToMap(st interface{}) map[string]interface{} { + + reqRules := make(map[string]interface{}) + + v := reflect.ValueOf(st) + t := reflect.TypeOf(st) + + for i := 0; i < v.NumField(); i++ { + key := strings.ToLower(t.Field(i).Name) + typ := v.FieldByName(t.Field(i).Name).Kind().String() + structTag := t.Field(i).Tag.Get("json") + jsonName := strings.TrimSpace(strings.Split(structTag, ",")[0]) + value := v.FieldByName(t.Field(i).Name) + + // if jsonName is not empty use it for the key + if jsonName != "" { + key = jsonName + } + + if typ == "string" { + if !(value.String() == "" && strings.Contains(structTag, "omitempty")) { + reqRules[key] = value.String() + } + } else if typ == "int" { + reqRules[key] = value.Int() + } else { + reqRules[key] = value.Interface() + } + + } + + return reqRules +} + +//Create and Update secGroup pretty much do the same thing, so this function abstracts those out. +func (c *Client) secGroupCreateHelper(url, method, name string, rules []SecGroupRule, spaceGuids []string) (*SecGroup, error) { + reqRules := make([]map[string]interface{}, len(rules)) + + for i, rule := range rules { + reqRules[i] = ConvertStructToMap(rule) + protocol := strings.ToLower(reqRules[i]["protocol"].(string)) + + // if not icmp protocol need to remove the Code/Type fields + if protocol != "icmp" { + delete(reqRules[i], "code") + delete(reqRules[i], "type") + } + } + + req := c.NewRequest(method, url) + //set up request body + req.obj = map[string]interface{}{ + "name": name, + "rules": reqRules, + "space_guids": spaceGuids, + } + //fire off the request and check for problems + resp, err := c.DoRequest(req) + if err != nil { + return nil, err + } + if resp.StatusCode != 201 { // Both create and update should give 201 CREATED + var response SecGroupCreateResponse + + bodyRaw, _ := ioutil.ReadAll(resp.Body) + + err = json.Unmarshal(bodyRaw, &response) + if err != nil { + return nil, errors.Wrap(err, "Error unmarshaling response") + } + + return nil, fmt.Errorf(`Request failed CF API returned with status code %d +------------------------------- +Error Code %s +Code %d +Description %s`, + resp.StatusCode, response.ErrorCode, response.Code, response.Description) + } + //get the json from the response body + return respBodyToSecGroup(resp.Body, c) +} diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/service_bindings.go b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/service_bindings.go new file mode 100644 index 00000000..22798860 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/service_bindings.go @@ -0,0 +1,104 @@ +package cfclient + +import ( + "encoding/json" + "io/ioutil" + "net/url" + + "github.com/pkg/errors" +) + +type ServiceBindingsResponse struct { + Count int `json:"total_results"` + Pages int `json:"total_pages"` + Resources []ServiceBindingResource `json:"resources"` + NextUrl string `json:"next_url"` +} + +type ServiceBindingResource struct { + Meta Meta `json:"metadata"` + Entity ServiceBinding `json:"entity"` +} + +type ServiceBinding struct { + Guid string `json:"guid"` + AppGuid string `json:"app_guid"` + ServiceInstanceGuid string `json:"service_instance_guid"` + Credentials interface{} `json:"credentials"` + BindingOptions interface{} `json:"binding_options"` + GatewayData interface{} `json:"gateway_data"` + GatewayName string `json:"gateway_name"` + SyslogDrainUrl string `json:"syslog_drain_url"` + VolumeMounts interface{} `json:"volume_mounts"` + AppUrl string `json:"app_url"` + ServiceInstanceUrl string `json:"service_instance_url"` + c *Client +} + +func (c *Client) ListServiceBindingsByQuery(query url.Values) ([]ServiceBinding, error) { + var serviceBindings []ServiceBinding + var serviceBindingsResp ServiceBindingsResponse + pages := 0 + + requestUrl := "/v2/service_bindings?" + query.Encode() + for { + r := c.NewRequest("GET", requestUrl) + resp, err := c.DoRequest(r) + if err != nil { + return nil, errors.Wrap(err, "Error requesting service bindings") + } + resBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, errors.Wrap(err, "Error reading service bindings request:") + } + + err = json.Unmarshal(resBody, &serviceBindingsResp) + if err != nil { + return nil, errors.Wrap(err, "Error unmarshaling service bindings") + } + for _, serviceBinding := range serviceBindingsResp.Resources { + serviceBinding.Entity.Guid = serviceBinding.Meta.Guid + serviceBinding.Entity.c = c + serviceBindings = append(serviceBindings, serviceBinding.Entity) + } + requestUrl = serviceBindingsResp.NextUrl + if requestUrl == "" { + break + } + pages += 1 + totalPages := serviceBindingsResp.Pages + if totalPages > 0 && pages >= totalPages { + break + } + } + return serviceBindings, nil +} + +func (c *Client) ListServiceBindings() ([]ServiceBinding, error) { + return c.ListServiceBindingsByQuery(nil) +} + +func (c *Client) GetServiceBindingByGuid(guid string) (ServiceBinding, error) { + var serviceBinding ServiceBindingResource + r := c.NewRequest("GET", "/v2/service_bindings/"+url.QueryEscape(guid)) + resp, err := c.DoRequest(r) + if err != nil { + return ServiceBinding{}, errors.Wrap(err, "Error requesting serving binding") + } + defer resp.Body.Close() + resBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return ServiceBinding{}, errors.Wrap(err, "Error reading service binding response body") + } + err = json.Unmarshal(resBody, &serviceBinding) + if err != nil { + return ServiceBinding{}, errors.Wrap(err, "Error unmarshalling service binding") + } + serviceBinding.Entity.Guid = serviceBinding.Meta.Guid + serviceBinding.Entity.c = c + return serviceBinding.Entity, nil +} + +func (c *Client) ServiceBindingByGuid(guid string) (ServiceBinding, error) { + return c.GetServiceBindingByGuid(guid) +} diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/service_instances.go b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/service_instances.go new file mode 100644 index 00000000..91009dba --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/service_instances.go @@ -0,0 +1,112 @@ +package cfclient + +import ( + "encoding/json" + "io/ioutil" + "net/url" + + "github.com/pkg/errors" +) + +type ServiceInstancesResponse struct { + Count int `json:"total_results"` + Pages int `json:"total_pages"` + NextUrl string `json:"next_url"` + Resources []ServiceInstanceResource `json:"resources"` +} + +type ServiceInstanceResource struct { + Meta Meta `json:"metadata"` + Entity ServiceInstance `json:"entity"` +} + +type ServiceInstance struct { + Name string `json:"name"` + Credentials map[string]interface{} `json:"credentials"` + ServicePlanGuid string `json:"service_plan_guid"` + SpaceGuid string `json:"space_guid"` + DashboardUrl string `json:"dashboard_url"` + Type string `json:"type"` + LastOperation LastOperation `json:"last_operation"` + Tags []string `json:"tags"` + ServiceGuid string `json:"service_guid"` + SpaceUrl string `json:"space_url"` + ServicePlanUrl string `json:"service_plan_url"` + ServiceBindingsUrl string `json:"service_bindings_url"` + ServiceKeysUrl string `json:"service_keys_url"` + RoutesUrl string `json:"routes_url"` + ServiceUrl string `json:"service_url"` + Guid string `json:"guid"` + c *Client +} + +type LastOperation struct { + Type string `json:"type"` + State string `json:"state"` + Description string `json:"description"` + UpdatedAt string `json:"updated_at"` + CreatedAt string `json:"created_at"` +} + +func (c *Client) ListServiceInstancesByQuery(query url.Values) ([]ServiceInstance, error) { + var instances []ServiceInstance + + requestUrl := "/v2/service_instances?" + query.Encode() + for { + var sir ServiceInstancesResponse + r := c.NewRequest("GET", requestUrl) + resp, err := c.DoRequest(r) + if err != nil { + return nil, errors.Wrap(err, "Error requesting service instances") + } + resBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, errors.Wrap(err, "Error reading service instances request:") + } + + err = json.Unmarshal(resBody, &sir) + if err != nil { + return nil, errors.Wrap(err, "Error unmarshaling service instances") + } + for _, instance := range sir.Resources { + instance.Entity.Guid = instance.Meta.Guid + instance.Entity.c = c + instances = append(instances, instance.Entity) + } + + requestUrl = sir.NextUrl + if requestUrl == "" { + break + } + } + return instances, nil +} + +func (c *Client) ListServiceInstances() ([]ServiceInstance, error) { + return c.ListServiceInstancesByQuery(nil) +} + +func (c *Client) GetServiceInstanceByGuid(guid string) (ServiceInstance, error) { + var sir ServiceInstanceResource + req := c.NewRequest("GET", "/v2/service_instances/"+guid) + res, err := c.DoRequest(req) + if err != nil { + return ServiceInstance{}, errors.Wrap(err, "Error requesting service instance") + } + + data, err := ioutil.ReadAll(res.Body) + if err != nil { + return ServiceInstance{}, errors.Wrap(err, "Error reading service instance response") + } + err = json.Unmarshal(data, &sir) + if err != nil { + return ServiceInstance{}, errors.Wrap(err, "Error JSON parsing service instance response") + } + sir.Entity.Guid = sir.Meta.Guid + sir.Entity.c = c + return sir.Entity, nil +} + +func (c *Client) ServiceInstanceByGuid(guid string) (ServiceInstance, error) { + return c.GetServiceInstanceByGuid(guid) +} diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/service_keys.go b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/service_keys.go new file mode 100644 index 00000000..1b7a02c5 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/service_keys.go @@ -0,0 +1,87 @@ +package cfclient + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/url" + + "github.com/pkg/errors" +) + +type ServiceKeysResponse struct { + Count int `json:"total_results"` + Pages int `json:"total_pages"` + Resources []ServiceKeyResource `json:"resources"` +} + +type ServiceKeyResource struct { + Meta Meta `json:"metadata"` + Entity ServiceKey `json:"entity"` +} + +type ServiceKey struct { + Name string `json:"name"` + Guid string `json:"guid"` + ServiceInstanceGuid string `json:"service_instance_guid"` + Credentials interface{} `json:"credentials"` + ServiceInstanceUrl string `json:"service_instance_url"` + c *Client +} + +func (c *Client) ListServiceKeysByQuery(query url.Values) ([]ServiceKey, error) { + var serviceKeys []ServiceKey + var serviceKeysResp ServiceKeysResponse + r := c.NewRequest("GET", "/v2/service_keys?"+query.Encode()) + resp, err := c.DoRequest(r) + if err != nil { + return nil, errors.Wrap(err, "Error requesting service keys") + } + resBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, errors.Wrap(err, "Error reading service keys request:") + } + + err = json.Unmarshal(resBody, &serviceKeysResp) + if err != nil { + return nil, errors.Wrap(err, "Error unmarshaling service keys") + } + for _, serviceKey := range serviceKeysResp.Resources { + serviceKey.Entity.Guid = serviceKey.Meta.Guid + serviceKey.Entity.c = c + serviceKeys = append(serviceKeys, serviceKey.Entity) + } + return serviceKeys, nil +} + +func (c *Client) ListServiceKeys() ([]ServiceKey, error) { + return c.ListServiceKeysByQuery(nil) +} + +func (c *Client) GetServiceKeyByName(name string) (ServiceKey, error) { + var serviceKey ServiceKey + q := url.Values{} + q.Set("q", "name:"+name) + serviceKeys, err := c.ListServiceKeysByQuery(q) + if err != nil { + return serviceKey, err + } + if len(serviceKeys) == 0 { + return serviceKey, fmt.Errorf("Unable to find service key %s", name) + } + return serviceKeys[0], nil +} + +func (c *Client) GetServiceKeyByInstanceGuid(guid string) (ServiceKey, error) { + var serviceKey ServiceKey + q := url.Values{} + q.Set("q", "service_instance_guid:"+guid) + serviceKeys, err := c.ListServiceKeysByQuery(q) + if err != nil { + return serviceKey, err + } + if len(serviceKeys) == 0 { + return serviceKey, fmt.Errorf("Unable to find service key for guid %s", guid) + } + return serviceKeys[0], nil +} diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/service_plan_visibilities.go b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/service_plan_visibilities.go new file mode 100644 index 00000000..b7a228fa --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/service_plan_visibilities.go @@ -0,0 +1,100 @@ +package cfclient + +import ( + "encoding/json" + "io" + "io/ioutil" + "net/http" + "net/url" + + "github.com/pkg/errors" +) + +type ServicePlanVisibilitiesResponse struct { + Count int `json:"total_results"` + Pages int `json:"total_pages"` + NextUrl string `json:"next_url"` + Resources []ServicePlanVisibilityResource `json:"resources"` +} + +type ServicePlanVisibilityResource struct { + Meta Meta `json:"metadata"` + Entity ServicePlanVisibility `json:"entity"` +} + +type ServicePlanVisibility struct { + Guid string `json:"guid"` + ServicePlanGuid string `json:"service_plan_guid"` + OrganizationGuid string `json:"organization_guid"` + ServicePlanUrl string `json:"service_plan_url"` + OrganizationUrl string `json:"organization_url"` + c *Client +} + +func (c *Client) ListServicePlanVisibilitiesByQuery(query url.Values) ([]ServicePlanVisibility, error) { + var servicePlanVisibilities []ServicePlanVisibility + requestUrl := "/v2/service_plan_visibilities?" + query.Encode() + for { + var servicePlanVisibilitiesResp ServicePlanVisibilitiesResponse + r := c.NewRequest("GET", requestUrl) + resp, err := c.DoRequest(r) + if err != nil { + return nil, errors.Wrap(err, "Error requesting service plan visibilities") + } + resBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, errors.Wrap(err, "Error reading service plan visibilities request:") + } + + err = json.Unmarshal(resBody, &servicePlanVisibilitiesResp) + if err != nil { + return nil, errors.Wrap(err, "Error unmarshaling service plan visibilities") + } + for _, servicePlanVisibility := range servicePlanVisibilitiesResp.Resources { + servicePlanVisibility.Entity.Guid = servicePlanVisibility.Meta.Guid + servicePlanVisibility.Entity.c = c + servicePlanVisibilities = append(servicePlanVisibilities, servicePlanVisibility.Entity) + } + requestUrl = servicePlanVisibilitiesResp.NextUrl + if requestUrl == "" { + break + } + } + return servicePlanVisibilities, nil +} + +func (c *Client) ListServicePlanVisibilities() ([]ServicePlanVisibility, error) { + return c.ListServicePlanVisibilitiesByQuery(nil) +} + +func (c *Client) CreateServicePlanVisibility(servicePlanGuid string, organizationGuid string) (ServicePlanVisibility, error) { + req := c.NewRequest("POST", "/v2/service_plan_visibilities") + req.obj = map[string]interface{}{ + "service_plan_guid": servicePlanGuid, + "organization_guid": organizationGuid, + } + resp, err := c.DoRequest(req) + if err != nil { + return ServicePlanVisibility{}, err + } + if resp.StatusCode != http.StatusCreated { + return ServicePlanVisibility{}, errors.Wrapf(err, "Error creating service plan visibility, response code: %d", resp.StatusCode) + } + return respBodyToServicePlanVisibility(resp.Body, c) +} + +func respBodyToServicePlanVisibility(body io.ReadCloser, c *Client) (ServicePlanVisibility, error) { + bodyRaw, err := ioutil.ReadAll(body) + if err != nil { + return ServicePlanVisibility{}, err + } + servicePlanVisibilityRes := ServicePlanVisibilityResource{} + err = json.Unmarshal([]byte(bodyRaw), &servicePlanVisibilityRes) + if err != nil { + return ServicePlanVisibility{}, err + } + servicePlanVisibility := servicePlanVisibilityRes.Entity + servicePlanVisibility.Guid = servicePlanVisibilityRes.Meta.Guid + servicePlanVisibility.c = c + return servicePlanVisibility, nil +} diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/service_plans.go b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/service_plans.go new file mode 100644 index 00000000..777e1858 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/service_plans.go @@ -0,0 +1,65 @@ +package cfclient + +import ( + "encoding/json" + "io/ioutil" + "net/url" + + "github.com/pkg/errors" +) + +type ServicePlansResponse struct { + Count int `json:"total_results"` + Pages int `json:"total_pages"` + Resources []ServicePlanResource `json:"resources"` +} + +type ServicePlanResource struct { + Meta Meta `json:"metadata"` + Entity ServicePlan `json:"entity"` +} + +type ServicePlan struct { + Name string `json:"name"` + Guid string `json:"guid"` + Free bool `json:"free"` + Description string `json:"description"` + ServiceGuid string `json:"service_guid"` + Extra interface{} `json:"extra"` + UniqueId string `json:"unique_id"` + Public bool `json:"public"` + Active bool `json:"active"` + Bindable bool `json:"bindable"` + ServiceUrl string `json:"service_url"` + ServiceInstancesUrl string `json:"service_instances_url"` + c *Client +} + +func (c *Client) ListServicePlansByQuery(query url.Values) ([]ServicePlan, error) { + var servicePlans []ServicePlan + var servicePlansResp ServicePlansResponse + r := c.NewRequest("GET", "/v2/service_plans?"+query.Encode()) + resp, err := c.DoRequest(r) + if err != nil { + return nil, errors.Wrap(err, "Error requesting service plans") + } + resBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, errors.Wrap(err, "Error reading service plans request:") + } + + err = json.Unmarshal(resBody, &servicePlansResp) + if err != nil { + return nil, errors.Wrap(err, "Error unmarshaling service plans") + } + for _, servicePlan := range servicePlansResp.Resources { + servicePlan.Entity.Guid = servicePlan.Meta.Guid + servicePlan.Entity.c = c + servicePlans = append(servicePlans, servicePlan.Entity) + } + return servicePlans, nil +} + +func (c *Client) ListServicePlans() ([]ServicePlan, error) { + return c.ListServicePlansByQuery(nil) +} diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/services.go b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/services.go index 48a5c58b..f28c627d 100644 --- a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/services.go +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/services.go @@ -3,16 +3,18 @@ package cfclient import ( "encoding/json" "io/ioutil" - "log" + "net/url" + + "github.com/pkg/errors" ) -type ServiceResponse struct { - Count int `json:"total_results"` - Pages int `json:"total_pages"` - Resources []ServiceResource `json:"resources"` +type ServicesResponse struct { + Count int `json:"total_results"` + Pages int `json:"total_pages"` + Resources []ServicesResource `json:"resources"` } -type ServiceResource struct { +type ServicesResource struct { Meta Meta `json:"metadata"` Entity Service `json:"entity"` } @@ -23,27 +25,37 @@ type Service struct { c *Client } -func (c *Client) ListServices() []Service { +type ServiceSummary struct { + Guid string `json:"guid"` + Name string `json:"name"` + BoundAppCount int `json:"bound_app_count"` +} + +func (c *Client) ListServicesByQuery(query url.Values) ([]Service, error) { var services []Service - var serviceResp ServiceResponse - r := c.newRequest("GET", "/v2/services") - resp, err := c.doRequest(r) + var serviceResp ServicesResponse + r := c.NewRequest("GET", "/v2/services?"+query.Encode()) + resp, err := c.DoRequest(r) if err != nil { - log.Printf("Error requesting services %v", err) + return nil, errors.Wrap(err, "Error requesting services") } resBody, err := ioutil.ReadAll(resp.Body) if err != nil { - log.Printf("Error reading services request %v", resBody) + return nil, errors.Wrap(err, "Error reading services request:") } err = json.Unmarshal(resBody, &serviceResp) if err != nil { - log.Printf("Error unmarshaling services %v", err) + return nil, errors.Wrap(err, "Error unmarshaling services") } for _, service := range serviceResp.Resources { service.Entity.Guid = service.Meta.Guid service.Entity.c = c services = append(services, service.Entity) } - return services + return services, nil +} + +func (c *Client) ListServices() ([]Service, error) { + return c.ListServicesByQuery(nil) } diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/space_quotas.go b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/space_quotas.go new file mode 100644 index 00000000..14b8661f --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/space_quotas.go @@ -0,0 +1,95 @@ +package cfclient + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/url" + + "github.com/pkg/errors" +) + +type SpaceQuotasResponse struct { + Count int `json:"total_results"` + Pages int `json:"total_pages"` + NextUrl string `json:"next_url"` + Resources []SpaceQuotasResource `json:"resources"` +} + +type SpaceQuotasResource struct { + Meta Meta `json:"metadata"` + Entity SpaceQuota `json:"entity"` +} + +type SpaceQuota struct { + Guid string `json:"guid"` + Name string `json:"name"` + OrganizationGuid string `json:"organization_guid"` + NonBasicServicesAllowed bool `json:"non_basic_services_allowed"` + TotalServices int `json:"total_services"` + TotalRoutes int `json:"total_routes"` + MemoryLimit int `json:"memory_limit"` + InstanceMemoryLimit int `json:"instance_memory_limit"` + AppInstanceLimit int `json:"app_instance_limit"` + AppTaskLimit int `json:"app_task_limit"` + TotalServiceKeys int `json:"total_service_keys"` + TotalReservedRoutePorts int `json:"total_reserved_route_ports"` + c *Client +} + +func (c *Client) ListSpaceQuotasByQuery(query url.Values) ([]SpaceQuota, error) { + var spaceQuotas []SpaceQuota + requestUrl := "/v2/space_quota_definitions?" + query.Encode() + for { + spaceQuotasResp, err := c.getSpaceQuotasResponse(requestUrl) + if err != nil { + return []SpaceQuota{}, err + } + for _, space := range spaceQuotasResp.Resources { + space.Entity.Guid = space.Meta.Guid + space.Entity.c = c + spaceQuotas = append(spaceQuotas, space.Entity) + } + requestUrl = spaceQuotasResp.NextUrl + if requestUrl == "" { + break + } + } + return spaceQuotas, nil +} + +func (c *Client) ListSpaceQuotas() ([]SpaceQuota, error) { + return c.ListSpaceQuotasByQuery(nil) +} + +func (c *Client) GetSpaceQuotaByName(name string) (SpaceQuota, error) { + q := url.Values{} + q.Set("q", "name:"+name) + spaceQuotas, err := c.ListSpaceQuotasByQuery(q) + if err != nil { + return SpaceQuota{}, err + } + if len(spaceQuotas) != 1 { + return SpaceQuota{}, fmt.Errorf("Unable to find space quota " + name) + } + return spaceQuotas[0], nil +} + +func (c *Client) getSpaceQuotasResponse(requestUrl string) (SpaceQuotasResponse, error) { + var spaceQuotasResp SpaceQuotasResponse + r := c.NewRequest("GET", requestUrl) + resp, err := c.DoRequest(r) + if err != nil { + return SpaceQuotasResponse{}, errors.Wrap(err, "Error requesting space quotas") + } + resBody, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return SpaceQuotasResponse{}, errors.Wrap(err, "Error reading space quotas body") + } + err = json.Unmarshal(resBody, &spaceQuotasResp) + if err != nil { + return SpaceQuotasResponse{}, errors.Wrap(err, "Error unmarshalling space quotas") + } + return spaceQuotasResp, nil +} diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/spaces.go b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/spaces.go index df4d3abe..f3941130 100644 --- a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/spaces.go +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/spaces.go @@ -1,14 +1,32 @@ package cfclient import ( + "bytes" "encoding/json" + "fmt" "io/ioutil" - "log" + "net/http" + "net/url" + + "github.com/pkg/errors" ) +type SpaceRequest struct { + Name string `json:"name"` + OrganizationGuid string `json:"organization_guid"` + DeveloperGuid []string `json:"developer_guids"` + ManagerGuid []string `json:"manager_guids"` + AuditorGuid []string `json:"auditor_guids"` + DomainGuid []string `json:"domain_guids"` + SecurityGroupGuids []string `json:"security_group_guids"` + SpaceQuotaDefGuid []string `json:"space_quota_definition_guid"` + AllowSSH []string `json:"allow_ssh"` +} + type SpaceResponse struct { Count int `json:"total_results"` Pages int `json:"total_pages"` + NextUrl string `json:"next_url"` Resources []SpaceResource `json:"resources"` } @@ -18,55 +36,354 @@ type SpaceResource struct { } type Space struct { - Guid string `json:"guid"` - Name string `json:"name"` - OrgURL string `json:"organization_url"` - OrgData OrgResource `json:"organization"` - c *Client + Guid string `json:"guid"` + Name string `json:"name"` + OrganizationGuid string `json:"organization_guid"` + OrgURL string `json:"organization_url"` + OrgData OrgResource `json:"organization"` + QuotaDefinitionGuid string `json:"space_quota_definition_guid"` + c *Client +} + +type SpaceSummary struct { + Guid string `json:"guid"` + Name string `json:"name"` + Apps []AppSummary `json:"apps"` + Services []ServiceSummary `json:"services"` +} + +type SpaceRoleResponse struct { + Count int `json:"total_results"` + Pages int `json:"total_pages"` + NextUrl string `json:"next_url"` + Resources []SpaceRoleResource `json:"resources"` } -func (s *Space) Org() Org { +type SpaceRoleResource struct { + Meta Meta `json:"metadata"` + Entity SpaceRole `json:"entity"` +} + +type SpaceRole struct { + Guid string `json:"guid"` + Admin bool `json:"admin"` + Active bool `json:"active"` + DefaultSpaceGuid string `json:"default_space_guid"` + Username string `json:"username"` + SpaceRoles []string `json:"space_roles"` + SpacesUrl string `json:"spaces_url"` + OrganizationsUrl string `json:"organizations_url"` + ManagedOrganizationsUrl string `json:"managed_organizations_url"` + BillingManagedOrganizationsUrl string `json:"billing_managed_organizations_url"` + AuditedOrganizationsUrl string `json:"audited_organizations_url"` + ManagedSpacesUrl string `json:"managed_spaces_url"` + AuditedSpacesUrl string `json:"audited_spaces_url"` + c *Client +} + +func (s *Space) Org() (Org, error) { var orgResource OrgResource - r := s.c.newRequest("GET", s.OrgURL) - resp, err := s.c.doRequest(r) + r := s.c.NewRequest("GET", s.OrgURL) + resp, err := s.c.DoRequest(r) if err != nil { - log.Printf("Error requesting org %v", err) + return Org{}, errors.Wrap(err, "Error requesting org") } resBody, err := ioutil.ReadAll(resp.Body) if err != nil { - log.Printf("Error reading org request %v", resBody) + return Org{}, errors.Wrap(err, "Error reading org request") } err = json.Unmarshal(resBody, &orgResource) if err != nil { - log.Printf("Error unmarshaling org %v", err) + return Org{}, errors.Wrap(err, "Error unmarshaling org") } orgResource.Entity.Guid = orgResource.Meta.Guid orgResource.Entity.c = s.c - return orgResource.Entity + return orgResource.Entity, nil +} + +func (s *Space) Quota() (*SpaceQuota, error) { + var spaceQuota *SpaceQuota + var spaceQuotaResource SpaceQuotasResource + if s.QuotaDefinitionGuid == "" { + return nil, nil + } + requestUrl := fmt.Sprintf("/v2/space_quota_definitions/%s", s.QuotaDefinitionGuid) + r := s.c.NewRequest("GET", requestUrl) + resp, err := s.c.DoRequest(r) + if err != nil { + return &SpaceQuota{}, errors.Wrap(err, "Error requesting space quota") + } + resBody, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return &SpaceQuota{}, errors.Wrap(err, "Error reading space quota body") + } + err = json.Unmarshal(resBody, &spaceQuotaResource) + if err != nil { + return &SpaceQuota{}, errors.Wrap(err, "Error unmarshalling space quota") + } + spaceQuota = &spaceQuotaResource.Entity + spaceQuota.Guid = spaceQuotaResource.Meta.Guid + spaceQuota.c = s.c + return spaceQuota, nil +} + +func (s *Space) Summary() (SpaceSummary, error) { + var spaceSummary SpaceSummary + requestUrl := fmt.Sprintf("/v2/spaces/%s/summary", s.Guid) + r := s.c.NewRequest("GET", requestUrl) + resp, err := s.c.DoRequest(r) + if err != nil { + return SpaceSummary{}, errors.Wrap(err, "Error requesting space summary") + } + resBody, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return SpaceSummary{}, errors.Wrap(err, "Error reading space summary body") + } + err = json.Unmarshal(resBody, &spaceSummary) + if err != nil { + return SpaceSummary{}, errors.Wrap(err, "Error unmarshalling space summary") + } + return spaceSummary, nil +} + +func (s *Space) Roles() ([]SpaceRole, error) { + var roles []SpaceRole + requestUrl := fmt.Sprintf("/v2/spaces/%s/user_roles", s.Guid) + for { + rolesResp, err := s.c.getSpaceRolesResponse(requestUrl) + if err != nil { + return roles, err + } + for _, role := range rolesResp.Resources { + role.Entity.Guid = role.Meta.Guid + role.Entity.c = s.c + roles = append(roles, role.Entity) + } + requestUrl = rolesResp.NextUrl + if requestUrl == "" { + break + } + } + return roles, nil +} + +func (c *Client) CreateSpace(req SpaceRequest) (Space, error) { + buf := bytes.NewBuffer(nil) + err := json.NewEncoder(buf).Encode(req) + if err != nil { + return Space{}, err + } + r := c.NewRequestWithBody("POST", "/v2/spaces", buf) + resp, err := c.DoRequest(r) + if err != nil { + return Space{}, err + } + if resp.StatusCode != http.StatusCreated { + return Space{}, fmt.Errorf("CF API returned with status code %d", resp.StatusCode) + } + return c.handleSpaceResp(resp) +} + +func (c *Client) AssociateSpaceDeveloperByUsername(spaceGUID, name string) (Space, error) { + space := Space{Guid: spaceGUID, c: c} + return space.AssociateDeveloperByUsername(name) +} + +func (c *Client) RemoveSpaceDeveloperByUsername(spaceGUID, name string) error { + space := Space{Guid: spaceGUID, c: c} + return space.RemoveDeveloperByUsername(name) +} + +func (c *Client) AssociateSpaceAuditorByUsername(spaceGUID, name string) (Space, error) { + space := Space{Guid: spaceGUID, c: c} + return space.AssociateAuditorByUsername(name) +} + +func (c *Client) RemoveSpaceAuditorByUsername(spaceGUID, name string) error { + space := Space{Guid: spaceGUID, c: c} + return space.RemoveAuditorByUsername(name) +} + +func (s *Space) AssociateDeveloperByUsername(name string) (Space, error) { + requestUrl := fmt.Sprintf("/v2/spaces/%s/developers", s.Guid) + buf := bytes.NewBuffer(nil) + err := json.NewEncoder(buf).Encode(map[string]string{"username": name}) + if err != nil { + return Space{}, err + } + r := s.c.NewRequestWithBody("PUT", requestUrl, buf) + resp, err := s.c.DoRequest(r) + if err != nil { + return Space{}, err + } + if resp.StatusCode != http.StatusCreated { + return Space{}, fmt.Errorf("CF API returned with status code %d", resp.StatusCode) + } + return s.c.handleSpaceResp(resp) } -func (c *Client) ListSpaces() []Space { +func (s *Space) RemoveDeveloperByUsername(name string) error { + requestUrl := fmt.Sprintf("/v2/spaces/%s/developers", s.Guid) + buf := bytes.NewBuffer(nil) + err := json.NewEncoder(buf).Encode(map[string]string{"username": name}) + if err != nil { + return err + } + r := s.c.NewRequestWithBody("DELETE", requestUrl, buf) + resp, err := s.c.DoRequest(r) + if err != nil { + return err + } + if resp.StatusCode != http.StatusNoContent { + return fmt.Errorf("CF API returned with status code %d", resp.StatusCode) + } + return nil +} +func (s *Space) AssociateAuditorByUsername(name string) (Space, error) { + requestUrl := fmt.Sprintf("/v2/spaces/%s/auditors", s.Guid) + buf := bytes.NewBuffer(nil) + err := json.NewEncoder(buf).Encode(map[string]string{"username": name}) + if err != nil { + return Space{}, err + } + r := s.c.NewRequestWithBody("PUT", requestUrl, buf) + resp, err := s.c.DoRequest(r) + if err != nil { + return Space{}, err + } + if resp.StatusCode != http.StatusCreated { + return Space{}, fmt.Errorf("CF API returned with status code %d", resp.StatusCode) + } + return s.c.handleSpaceResp(resp) +} + +func (s *Space) RemoveAuditorByUsername(name string) error { + requestUrl := fmt.Sprintf("/v2/spaces/%s/auditors", s.Guid) + buf := bytes.NewBuffer(nil) + err := json.NewEncoder(buf).Encode(map[string]string{"username": name}) + if err != nil { + return err + } + r := s.c.NewRequestWithBody("DELETE", requestUrl, buf) + resp, err := s.c.DoRequest(r) + if err != nil { + return err + } + if resp.StatusCode != http.StatusNoContent { + return fmt.Errorf("CF API returned with status code %d", resp.StatusCode) + } + return nil +} + +func (c *Client) ListSpacesByQuery(query url.Values) ([]Space, error) { + return c.fetchSpaces("/v2/spaces?" + query.Encode()) +} + +func (c *Client) ListSpaces() ([]Space, error) { + return c.ListSpacesByQuery(nil) +} + +func (c *Client) fetchSpaces(requestUrl string) ([]Space, error) { var spaces []Space + for { + spaceResp, err := c.getSpaceResponse(requestUrl) + if err != nil { + return []Space{}, err + } + for _, space := range spaceResp.Resources { + space.Entity.Guid = space.Meta.Guid + space.Entity.c = c + spaces = append(spaces, space.Entity) + } + requestUrl = spaceResp.NextUrl + if requestUrl == "" { + break + } + } + return spaces, nil +} + +func (c *Client) GetSpaceByName(spaceName string, orgGuid string) (space Space, err error) { + query := url.Values{} + query.Add("q", fmt.Sprintf("organization_guid:%s", orgGuid)) + query.Add("q", fmt.Sprintf("name:%s", spaceName)) + spaces, err := c.ListSpacesByQuery(query) + if err != nil { + return + } + + if len(spaces) == 0 { + return space, fmt.Errorf("No space found with name: `%s` in org with GUID: `%s`", spaceName, orgGuid) + } + + return spaces[0], nil + +} + +func (c *Client) GetSpaceByGuid(spaceGUID string) (Space, error) { + requestUrl := fmt.Sprintf("/v2/spaces/%s", spaceGUID) + r := c.NewRequest("GET", requestUrl) + resp, err := c.DoRequest(r) + if err != nil { + return Space{}, errors.Wrap(err, "Error requesting space info") + } + return c.handleSpaceResp(resp) +} + +func (c *Client) getSpaceResponse(requestUrl string) (SpaceResponse, error) { var spaceResp SpaceResponse - r := c.newRequest("GET", "/v2/spaces") - resp, err := c.doRequest(r) + r := c.NewRequest("GET", requestUrl) + resp, err := c.DoRequest(r) if err != nil { - log.Printf("Error requesting spaces %v", err) + return SpaceResponse{}, errors.Wrap(err, "Error requesting spaces") } resBody, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() if err != nil { - log.Printf("Error reading space request %v", resBody) + return SpaceResponse{}, errors.Wrap(err, "Error reading space request") } - err = json.Unmarshal(resBody, &spaceResp) if err != nil { - log.Printf("Error unmarshalling space %v", err) + return SpaceResponse{}, errors.Wrap(err, "Error unmarshalling space") } - for _, space := range spaceResp.Resources { - space.Entity.Guid = space.Meta.Guid - space.Entity.c = c - spaces = append(spaces, space.Entity) + return spaceResp, nil +} + +func (c *Client) getSpaceRolesResponse(requestUrl string) (SpaceRoleResponse, error) { + var roleResp SpaceRoleResponse + r := c.NewRequest("GET", requestUrl) + resp, err := c.DoRequest(r) + if err != nil { + return roleResp, errors.Wrap(err, "Error requesting space roles") + } + resBody, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return roleResp, errors.Wrap(err, "Error reading space roles request") + } + err = json.Unmarshal(resBody, &roleResp) + if err != nil { + return roleResp, errors.Wrap(err, "Error unmarshalling space roles") + } + return roleResp, nil +} + +func (c *Client) handleSpaceResp(resp *http.Response) (Space, error) { + body, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return Space{}, err + } + var spaceResource SpaceResource + err = json.Unmarshal(body, &spaceResource) + if err != nil { + return Space{}, err } - return spaces + space := spaceResource.Entity + space.Guid = spaceResource.Meta.Guid + space.c = c + return space, nil } diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/stacks.go b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/stacks.go new file mode 100644 index 00000000..c501d783 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/stacks.go @@ -0,0 +1,72 @@ +package cfclient + +import ( + "encoding/json" + "io/ioutil" + "net/url" + + "github.com/pkg/errors" +) + +type StacksResponse struct { + Count int `json:"total_results"` + Pages int `json:"total_pages"` + NextUrl string `json:"next_url"` + Resources []StacksResource `json:"resources"` +} + +type StacksResource struct { + Meta Meta `json:"metadata"` + Entity Stack `json:"entity"` +} + +type Stack struct { + Guid string `json:"guid"` + Name string `json:"name"` + Description string `json:"description"` + c *Client +} + +func (c *Client) ListStacksByQuery(query url.Values) ([]Stack, error) { + var stacks []Stack + requestUrl := "/v2/stacks?" + query.Encode() + for { + stacksResp, err := c.getStacksResponse(requestUrl) + if err != nil { + return []Stack{}, err + } + for _, stack := range stacksResp.Resources { + stack.Entity.Guid = stack.Meta.Guid + stack.Entity.c = c + stacks = append(stacks, stack.Entity) + } + requestUrl = stacksResp.NextUrl + if requestUrl == "" { + break + } + } + return stacks, nil +} + +func (c *Client) ListStacks() ([]Stack, error) { + return c.ListStacksByQuery(nil) +} + +func (c *Client) getStacksResponse(requestUrl string) (StacksResponse, error) { + var stacksResp StacksResponse + r := c.NewRequest("GET", requestUrl) + resp, err := c.DoRequest(r) + if err != nil { + return StacksResponse{}, errors.Wrap(err, "Error requesting stacks") + } + resBody, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return StacksResponse{}, errors.Wrap(err, "Error reading stacks body") + } + err = json.Unmarshal(resBody, &stacksResp) + if err != nil { + return StacksResponse{}, errors.Wrap(err, "Error unmarshalling stacks") + } + return stacksResp, nil +} diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/tasks.go b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/tasks.go new file mode 100644 index 00000000..6c54204f --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/tasks.go @@ -0,0 +1,221 @@ +package cfclient + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "io/ioutil" + "net/url" + "time" + + "github.com/pkg/errors" +) + +// TaskListResponse is the JSON response from the API. +type TaskListResponse struct { + Pagination struct { + TotalResults int `json:"total_results"` + TotalPages int `json:"total_pages"` + First struct { + Href string `json:"href"` + } `json:"first"` + Last struct { + Href string `json:"href"` + } `json:"last"` + Next interface{} `json:"next"` + Previous interface{} `json:"previous"` + } `json:"pagination"` + Tasks []Task `json:"resources"` +} + +// Task is a description of a task element. +type Task struct { + GUID string `json:"guid"` + SequenceID int `json:"sequence_id"` + Name string `json:"name"` + Command string `json:"command"` + State string `json:"state"` + MemoryInMb int `json:"memory_in_mb"` + DiskInMb int `json:"disk_in_mb"` + Result struct { + FailureReason string `json:"failure_reason"` + } `json:"result"` + CreatedAt time.Time `json:"created_at"` + UpdatedAt time.Time `json:"updated_at"` + DropletGUID string `json:"droplet_guid"` + Links struct { + Self struct { + Href string `json:"href"` + } `json:"self"` + App struct { + Href string `json:"href"` + } `json:"app"` + Droplet struct { + Href string `json:"href"` + } `json:"droplet"` + } `json:"links"` +} + +// TaskRequest is a v3 JSON object as described in: +// http://v3-apidocs.cloudfoundry.org/version/3.0.0/index.html#create-a-task +type TaskRequest struct { + Command string `json:"command"` + Name string `json:"name"` + MemoryInMegabyte int `json:"memory_in_mb"` + DiskInMegabyte int `json:"disk_in_mb"` + DropletGUID string `json:"droplet_guid"` +} + +func (c *Client) makeTaskListRequestWithParams(baseUrl string, query url.Values) ([]byte, error) { + requestUrl := baseUrl + "?" + query.Encode() + req := c.NewRequest("GET", requestUrl) + resp, err := c.DoRequest(req) + if err != nil { + return nil, errors.Wrap(err, "Error requesting tasks") + } + defer resp.Body.Close() + if resp.StatusCode != 200 { + return nil, errors.Wrapf(err, "Error requesting tasks: status code not 200, it was %d", resp.StatusCode) + } + return ioutil.ReadAll(resp.Body) +} + +func parseTaskListRespones(answer []byte) (TaskListResponse, error) { + var response TaskListResponse + err := json.Unmarshal(answer, &response) + if err != nil { + return response, errors.Wrap(err, "Error unmarshaling response %v") + } + return response, nil +} + +func (c *Client) handleTasksApiCall(apiUrl string, query url.Values) ([]Task, error) { + body, err := c.makeTaskListRequestWithParams(apiUrl, query) + if err != nil { + return nil, errors.Wrap(err, "Error requesting tasks") + } + response, err := parseTaskListRespones(body) + if err != nil { + return nil, errors.Wrap(err, "Error reading tasks") + } + return response.Tasks, nil +} + +// ListTasks returns all tasks the user has access to. +// See http://v3-apidocs.cloudfoundry.org/version/3.12.0/index.html#list-tasks +func (c *Client) ListTasks() ([]Task, error) { + return c.handleTasksApiCall("/v3/tasks", url.Values{}) +} + +// ListTasksByQuery returns all tasks the user has access to, with query parameters. +// See http://v3-apidocs.cloudfoundry.org/version/3.12.0/index.html#list-tasks +func (c *Client) ListTasksByQuery(query url.Values) ([]Task, error) { + return c.handleTasksApiCall("/v3/tasks", query) +} + +// TasksByApp returns task structures which aligned to an app identified by the given guid. +// See: http://v3-apidocs.cloudfoundry.org/version/3.12.0/index.html#list-tasks-for-an-app +func (c *Client) TasksByApp(guid string) ([]Task, error) { + return c.TasksByAppByQuery(guid, url.Values{}) +} + +// TasksByAppByQuery returns task structures which aligned to an app identified by the given guid +// and filtered by the given query parameters. +// See: http://v3-apidocs.cloudfoundry.org/version/3.12.0/index.html#list-tasks-for-an-app +func (c *Client) TasksByAppByQuery(guid string, query url.Values) ([]Task, error) { + uri := fmt.Sprintf("/v3/apps/%s/tasks", guid) + return c.handleTasksApiCall(uri, query) +} + +func createReader(tr TaskRequest) (io.Reader, error) { + rmap := make(map[string]string) + rmap["command"] = tr.Command + if tr.Name != "" { + rmap["name"] = tr.Name + } + // setting droplet GUID causing issues + if tr.MemoryInMegabyte != 0 { + rmap["memory_in_mb"] = fmt.Sprintf("%d", tr.MemoryInMegabyte) + } + if tr.DiskInMegabyte != 0 { + rmap["disk_in_mb"] = fmt.Sprintf("%d", tr.DiskInMegabyte) + } + + bodyReader := bytes.NewBuffer(nil) + enc := json.NewEncoder(bodyReader) + if err := enc.Encode(rmap); err != nil { + return nil, errors.Wrap(err, "Error during encoding task request") + } + return bodyReader, nil +} + +// CreateTask creates a new task in CF system and returns its structure. +func (c *Client) CreateTask(tr TaskRequest) (task Task, err error) { + bodyReader, err := createReader(tr) + if err != nil { + return task, err + } + + request := fmt.Sprintf("/v3/apps/%s/tasks", tr.DropletGUID) + req := c.NewRequestWithBody("POST", request, bodyReader) + + resp, err := c.DoRequest(req) + if err != nil { + return task, errors.Wrap(err, "Error creating task") + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return task, errors.Wrap(err, "Error reading task after creation") + } + + err = json.Unmarshal(body, &task) + if err != nil { + return task, errors.Wrap(err, "Error unmarshaling task") + } + return task, err +} + +// TaskByGuid returns a task structure by requesting it with the tasks GUID. +func (c *Client) GetTaskByGuid(guid string) (task Task, err error) { + request := fmt.Sprintf("/v3/tasks/%s", guid) + req := c.NewRequest("GET", request) + + resp, err := c.DoRequest(req) + if err != nil { + return task, errors.Wrap(err, "Error requesting task") + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + return task, errors.Wrap(err, "Error reading task") + } + + err = json.Unmarshal(body, &task) + if err != nil { + return task, errors.Wrap(err, "Error unmarshaling task") + } + return task, err +} + +func (c *Client) TaskByGuid(guid string) (task Task, err error) { + return c.GetTaskByGuid(guid) +} + +// TerminateTask cancels a task identified by its GUID. +func (c *Client) TerminateTask(guid string) error { + req := c.NewRequest("PUT", fmt.Sprintf("/v3/tasks/%s/cancel", guid)) + resp, err := c.DoRequest(req) + if err != nil { + return errors.Wrap(err, "Error terminating task") + } + defer resp.Body.Close() + + if resp.StatusCode != 202 { + return errors.Wrapf(err, "Failed terminating task, response status code %d", resp.StatusCode) + } + return nil +} diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/types.go b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/types.go index 1afa5dfa..279106bf 100644 --- a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/types.go +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/types.go @@ -1,5 +1,8 @@ package cfclient type Meta struct { - Guid string `json:"guid"` + Guid string `json:"guid"` + Url string `json:"url"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` } diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/user_provided_service_instances.go b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/user_provided_service_instances.go new file mode 100644 index 00000000..0769dd92 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/user_provided_service_instances.go @@ -0,0 +1,99 @@ +package cfclient + +import ( + "encoding/json" + "io/ioutil" + "net/url" + + "github.com/pkg/errors" +) + +type UserProvidedServiceInstancesResponse struct { + Count int `json:"total_results"` + Pages int `json:"total_pages"` + NextUrl string `json:"next_url"` + Resources []UserProvidedServiceInstanceResource `json:"resources"` +} + +type UserProvidedServiceInstanceResource struct { + Meta Meta `json:"metadata"` + Entity UserProvidedServiceInstance `json:"entity"` +} + +type UserProvidedServiceInstance struct { + Name string `json:"name"` + Credentials map[string]interface{} `json:"credentials"` + SpaceGuid string `json:"space_guid"` + Type string `json:"type"` + Tags []string `json:"tags"` + SpaceUrl string `json:"space_url"` + ServiceBindingsUrl string `json:"service_bindings_url"` + RoutesUrl string `json:"routes_url"` + RouteServiceUrl string `json:"route_service_url"` + SyslogDrainUrl string `json:"syslog_drain_url"` + Guid string `json:"guid"` + c *Client +} + +func (c *Client) ListUserProvidedServiceInstancesByQuery(query url.Values) ([]UserProvidedServiceInstance, error) { + var instances []UserProvidedServiceInstance + + requestUrl := "/v2/user_provided_service_instances?" + query.Encode() + for { + var sir UserProvidedServiceInstancesResponse + r := c.NewRequest("GET", requestUrl) + resp, err := c.DoRequest(r) + if err != nil { + return nil, errors.Wrap(err, "Error requesting user provided service instances") + } + resBody, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, errors.Wrap(err, "Error reading user provided service instances request:") + } + + err = json.Unmarshal(resBody, &sir) + if err != nil { + return nil, errors.Wrap(err, "Error unmarshaling user provided service instances") + } + for _, instance := range sir.Resources { + instance.Entity.Guid = instance.Meta.Guid + instance.Entity.c = c + instances = append(instances, instance.Entity) + } + + requestUrl = sir.NextUrl + if requestUrl == "" { + break + } + } + return instances, nil +} + +func (c *Client) ListUserProvidedServiceInstances() ([]UserProvidedServiceInstance, error) { + return c.ListUserProvidedServiceInstancesByQuery(nil) +} + +func (c *Client) GetUserProvidedServiceInstanceByGuid(guid string) (UserProvidedServiceInstance, error) { + var sir UserProvidedServiceInstanceResource + req := c.NewRequest("GET", "/v2/user_provided_service_instances/"+guid) + res, err := c.DoRequest(req) + if err != nil { + return UserProvidedServiceInstance{}, errors.Wrap(err, "Error requesting user provided service instance") + } + + data, err := ioutil.ReadAll(res.Body) + if err != nil { + return UserProvidedServiceInstance{}, errors.Wrap(err, "Error reading user provided service instance response") + } + err = json.Unmarshal(data, &sir) + if err != nil { + return UserProvidedServiceInstance{}, errors.Wrap(err, "Error JSON parsing user provided service instance response") + } + sir.Entity.Guid = sir.Meta.Guid + sir.Entity.c = c + return sir.Entity, nil +} + +func (c *Client) UserProvidedServiceInstanceByGuid(guid string) (UserProvidedServiceInstance, error) { + return c.GetUserProvidedServiceInstanceByGuid(guid) +} diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/users.go b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/users.go new file mode 100644 index 00000000..1c0bf79d --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry-community/go-cfclient/users.go @@ -0,0 +1,169 @@ +package cfclient + +import ( + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "net/url" + + "github.com/pkg/errors" +) + +type UserRequest struct { + Guid string `json:"guid"` + DefaultSpaceGuid string `json:"default_space_guid,omitempty"` +} + +type Users []User + +type User struct { + Guid string `json:"guid"` + Admin bool `json:"admin"` + Active bool `json:"active"` + DefaultSpaceGUID string `json:"default_space_guid"` + Username string `json:"username"` + SpacesURL string `json:"spaces_url"` + OrgsURL string `json:"organizations_url"` + ManagedOrgsURL string `json:"managed_organizations_url"` + BillingManagedOrgsURL string `json:"billing_managed_organizations_url"` + AuditedOrgsURL string `json:"audited_organizations_url"` + ManagedSpacesURL string `json:"managed_spaces_url"` + AuditedSpacesURL string `json:"audited_spaces_url"` + c *Client +} + +type UserResource struct { + Meta Meta `json:"metadata"` + Entity User `json:"entity"` +} + +type UserResponse struct { + Count int `json:"total_results"` + Pages int `json:"total_pages"` + NextUrl string `json:"next_url"` + Resources []UserResource `json:"resources"` +} + +func (c *Client) ListUsersByQuery(query url.Values) (Users, error) { + var users []User + requestUrl := "/v2/users?" + query.Encode() + for { + userResp, err := c.getUserResponse(requestUrl) + if err != nil { + return []User{}, err + } + for _, user := range userResp.Resources { + user.Entity.Guid = user.Meta.Guid + user.Entity.c = c + users = append(users, user.Entity) + } + requestUrl = userResp.NextUrl + if requestUrl == "" { + break + } + } + return users, nil +} + +func (c *Client) ListUsers() (Users, error) { + return c.ListUsersByQuery(nil) +} + +func (c *Client) ListUserSpaces(userGuid string) ([]Space, error) { + return c.fetchSpaces(fmt.Sprintf("/v2/users/%s/spaces", userGuid)) +} + +func (c *Client) ListUserAuditedSpaces(userGuid string) ([]Space, error) { + return c.fetchSpaces(fmt.Sprintf("/v2/users/%s/audited_spaces", userGuid)) +} + +func (c *Client) ListUserManagedSpaces(userGuid string) ([]Space, error) { + return c.fetchSpaces(fmt.Sprintf("/v2/users/%s/managed_spaces", userGuid)) +} + +func (c *Client) ListUserOrgs(userGuid string) ([]Org, error) { + return c.fetchOrgs(fmt.Sprintf("/v2/users/%s/organizations", userGuid)) +} + +func (c *Client) ListUserManagedOrgs(userGuid string) ([]Org, error) { + return c.fetchOrgs(fmt.Sprintf("/v2/users/%s/managed_organizations", userGuid)) +} + +func (c *Client) ListUserAuditedOrgs(userGuid string) ([]Org, error) { + return c.fetchOrgs(fmt.Sprintf("/v2/users/%s/audited_organizations", userGuid)) +} + +func (c *Client) ListUserBillingManagedOrgs(userGuid string) ([]Org, error) { + return c.fetchOrgs(fmt.Sprintf("/v2/users/%s/billing_managed_organizations", userGuid)) +} + +func (c *Client) CreateUser(req UserRequest) (User, error) { + buf := bytes.NewBuffer(nil) + err := json.NewEncoder(buf).Encode(req) + if err != nil { + return User{}, err + } + r := c.NewRequestWithBody("POST", "/v2/users", buf) + resp, err := c.DoRequest(r) + if err != nil { + return User{}, err + } + if resp.StatusCode != http.StatusCreated { + return User{}, errors.Wrapf(err, "Error creating user, response code: %d", resp.StatusCode) + } + body, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return User{}, err + } + var userResource UserResource + err = json.Unmarshal(body, &userResource) + if err != nil { + return User{}, err + } + user := userResource.Entity + user.Guid = userResource.Meta.Guid + user.c = c + return user, nil +} + +func (c *Client) DeleteUser(userGuid string) error { + resp, err := c.DoRequest(c.NewRequest("DELETE", fmt.Sprintf("/v2/users/%s", userGuid))) + if err != nil { + return err + } + if resp.StatusCode != http.StatusNoContent { + return errors.Wrapf(err, "Error deleting user %s, response code: %d", userGuid, resp.StatusCode) + } + return nil +} + +func (u Users) GetUserByUsername(username string) User { + for _, user := range u { + if user.Username == username { + return user + } + } + return User{} +} + +func (c *Client) getUserResponse(requestUrl string) (UserResponse, error) { + var userResp UserResponse + r := c.NewRequest("GET", requestUrl) + resp, err := c.DoRequest(r) + if err != nil { + return UserResponse{}, errors.Wrap(err, "Error requesting users") + } + resBody, err := ioutil.ReadAll(resp.Body) + defer resp.Body.Close() + if err != nil { + return UserResponse{}, errors.Wrap(err, "Error reading user request") + } + err = json.Unmarshal(resBody, &userResp) + if err != nil { + return UserResponse{}, errors.Wrap(err, "Error unmarshalling user") + } + return userResp, nil +} diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/lager/LICENSE b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/lager/LICENSE index 5c304d1a..f49a4e16 100644 --- a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/lager/LICENSE +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/lager/LICENSE @@ -1,4 +1,4 @@ -Apache License + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -178,7 +178,7 @@ Apache License APPENDIX: How to apply the Apache License to your work. To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" + boilerplate notice, with the fields enclosed by brackets "[]" replaced with your own identifying information. (Don't include the brackets!) The text should be enclosed in the appropriate comment syntax for the file format. We also recommend that a @@ -186,7 +186,7 @@ Apache License same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright {yyyy} {name of copyright owner} + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -198,4 +198,4 @@ Apache License distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and - limitations under the License. + limitations under the License. \ No newline at end of file diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/lager/NOTICE b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/lager/NOTICE index ff96b880..3c8dd5b6 100644 --- a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/lager/NOTICE +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/lager/NOTICE @@ -1,6 +1,6 @@ -lager +Copyright (c) 2015-Present CloudFoundry.org Foundation, Inc. All Rights Reserved. -Copyright (c) 2014-Present CloudFoundry.org Foundation, Inc. All Rights Reserved. +This project contains software that is Copyright (c) 2014-2015 Pivotal Software, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -13,3 +13,8 @@ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. + +This project may include a number of subcomponents with separate +copyright notices and license terms. Your use of these subcomponents +is subject to the terms and conditions of each subcomponent's license, +as noted in the LICENSE file. diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/lager/json_redacter.go b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/lager/json_redacter.go new file mode 100644 index 00000000..a564ac22 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/lager/json_redacter.go @@ -0,0 +1,112 @@ +package lager + +import ( + "encoding/json" + "regexp" +) + + +const awsAccessKeyIDPattern = `AKIA[A-Z0-9]{16}` +const awsSecretAccessKeyPattern = `KEY["']?\s*(?::|=>|=)\s*["']?[A-Z0-9/\+=]{40}["']?` +const cryptMD5Pattern = `\$1\$[A-Z0-9./]{1,16}\$[A-Z0-9./]{22}` +const cryptSHA256Pattern = `\$5\$[A-Z0-9./]{1,16}\$[A-Z0-9./]{43}` +const cryptSHA512Pattern = `\$6\$[A-Z0-9./]{1,16}\$[A-Z0-9./]{86}` +const privateKeyHeaderPattern = `-----BEGIN(.*)PRIVATE KEY-----` + +type JSONRedacter struct { + keyMatchers []*regexp.Regexp + valueMatchers []*regexp.Regexp +} + +func NewJSONRedacter(keyPatterns []string, valuePatterns []string) (*JSONRedacter, error) { + if keyPatterns == nil { + keyPatterns = []string{"[Pp]wd","[Pp]ass"} + } + if valuePatterns == nil { + valuePatterns = []string{awsAccessKeyIDPattern, awsSecretAccessKeyPattern, cryptMD5Pattern, cryptSHA256Pattern, cryptSHA512Pattern, privateKeyHeaderPattern} + } + ret := &JSONRedacter{} + for _ ,v := range keyPatterns { + r, err := regexp.Compile(v) + if err != nil { + return nil, err + } + ret.keyMatchers = append(ret.keyMatchers, r) + } + for _ ,v := range valuePatterns { + r, err := regexp.Compile(v) + if err != nil { + return nil, err + } + ret.valueMatchers = append(ret.valueMatchers, r) + } + return ret, nil +} + +func (r JSONRedacter) Redact(data []byte) []byte { + var jsonBlob interface{} + err := json.Unmarshal(data, &jsonBlob) + if err != nil { + return handleError(err) + } + r.redactValue(&jsonBlob) + + data, err = json.Marshal(jsonBlob) + if err != nil { + return handleError(err) + } + + return data +} + +func (r JSONRedacter) redactValue(data *interface{}) interface{} { + if data == nil { + return data + } + + if a, ok := (*data).([]interface{}); ok { + r.redactArray(&a) + } else if m, ok := (*data).(map[string]interface{}); ok { + r.redactObject(&m) + } else if s, ok := (*data).(string); ok { + for _, m := range r.valueMatchers { + if m.MatchString(s) { + (*data) = "*REDACTED*" + break + } + } + } + return (*data) +} + +func (r JSONRedacter) redactArray(data *[]interface{}) { + for i, _ := range *data { + r.redactValue(&((*data)[i])) + } +} + +func (r JSONRedacter) redactObject(data *map[string]interface{}) { + for k, v := range *data { + for _, m := range r.keyMatchers { + if m.MatchString(k) { + (*data)[k] = "*REDACTED*" + break + } + } + if (*data)[k] != "*REDACTED*" { + (*data)[k] = r.redactValue(&v) + } + } +} + +func handleError (err error) []byte { + var content []byte + if _, ok := err.(*json.UnsupportedTypeError); ok { + data := map[string]interface{}{"lager serialisation error": err.Error()} + content, err = json.Marshal(data) + } + if err != nil { + panic(err) + } + return content +} diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/lager/logger.go b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/lager/logger.go index 70727655..13467328 100644 --- a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/lager/logger.go +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/lager/logger.go @@ -117,6 +117,7 @@ func (l *logger) Error(action string, err error, data ...Data) { Message: fmt.Sprintf("%s.%s", l.task, action), LogLevel: ERROR, Data: logData, + Error: err, } for _, sink := range l.sinks { @@ -143,6 +144,7 @@ func (l *logger) Fatal(action string, err error, data ...Data) { Message: fmt.Sprintf("%s.%s", l.task, action), LogLevel: FATAL, Data: logData, + Error: err, } for _, sink := range l.sinks { diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/lager/models.go b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/lager/models.go index 94c0dac4..03a81040 100644 --- a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/lager/models.go +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/lager/models.go @@ -1,6 +1,9 @@ package lager -import "encoding/json" +import ( + "encoding/json" + "fmt" +) type LogLevel int @@ -19,12 +22,21 @@ type LogFormat struct { Message string `json:"message"` LogLevel LogLevel `json:"log_level"` Data Data `json:"data"` + Error error `json:"-"` } func (log LogFormat) ToJSON() []byte { content, err := json.Marshal(log) if err != nil { - panic(err) + _, ok1 := err.(*json.UnsupportedTypeError) + _, ok2 := err.(*json.MarshalerError) + if ok1 || ok2 { + log.Data = map[string]interface{}{"lager serialisation error": err.Error(), "data_dump": fmt.Sprintf("%#v", log.Data)} + content, err = json.Marshal(log) + } + if err != nil { + panic(err) + } } return content } diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/lager/package.go b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/lager/package.go new file mode 100644 index 00000000..7e8b063d --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/lager/package.go @@ -0,0 +1 @@ +package lager // import "code.cloudfoundry.org/lager" diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/lager/redacting_writer_sink.go b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/lager/redacting_writer_sink.go new file mode 100644 index 00000000..2c4e049e --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/lager/redacting_writer_sink.go @@ -0,0 +1,43 @@ +package lager + +import ( + "io" + "sync" +) + +type redactingWriterSink struct { + writer io.Writer + minLogLevel LogLevel + writeL *sync.Mutex + jsonRedacter *JSONRedacter +} + +func NewRedactingWriterSink(writer io.Writer, minLogLevel LogLevel, keyPatterns []string, valuePatterns []string) (Sink, error) { + jsonRedacter, err := NewJSONRedacter(keyPatterns, valuePatterns) + if err != nil { + return nil, err + } + return &redactingWriterSink{ + writer: writer, + minLogLevel: minLogLevel, + writeL: new(sync.Mutex), + jsonRedacter: jsonRedacter, + }, nil +} + +func (sink *redactingWriterSink) Log(log LogFormat) { + if log.LogLevel < sink.minLogLevel { + return + } + + sink.writeL.Lock() + v := log.ToJSON() + rv := sink.jsonRedacter.Redact(v) + + sink.writer.Write(rv) + sink.writer.Write([]byte("\n")) + sink.writeL.Unlock() +} + + + diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/noaa/NOTICE b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/noaa/NOTICE new file mode 100644 index 00000000..53aae89d --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/noaa/NOTICE @@ -0,0 +1,15 @@ +noaa + +Copyright (c) 2015-Present CloudFoundry.org Foundation, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/noaa/README.md b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/noaa/README.md index c07e13ba..2e9d8793 100644 --- a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/noaa/README.md +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/noaa/README.md @@ -1,23 +1,12 @@ -#NOAA +# NOAA [![slack.cloudfoundry.org][slack-badge]][loggregator-slack] -[![Build Status](https://travis-ci.org/cloudfoundry/noaa.svg?branch=master)](https://travis-ci.org/cloudfoundry/noaa) +[![Concourse Status](https://loggregator.ci.cf-app.com/api/v1/pipelines/submodules/jobs/noaa-unit-tests/badge)](https://loggregator.ci.cf-app.com/teams/main/pipelines/submodules/jobs/noaa-unit-tests) [![Coverage Status](https://coveralls.io/repos/cloudfoundry/noaa/badge.png)](https://coveralls.io/r/cloudfoundry/noaa) [![GoDoc](https://godoc.org/github.com/cloudfoundry/noaa?status.png)](https://godoc.org/github.com/cloudfoundry/noaa) -NOAA is a client library to consume metric and log messages from Doppler. +noaa is a client library to consume metric and log messages from Doppler. -##WARNING - -This library does not work with Go 1.3 through 1.3.3, due to a bug in the standard libraries. - -We support the two most recent stable versions of Golang. - -###Deprecation -The consumer of the `noaa` package has been deprecated in favor of the consumer in the `consumer` package. -When creating a new NOAA consumer, use `consumer.New()` instead of `noaa.NewConsumer()`. -See the samples for more details. - -##Get the Code +## Get the Code This Go project is designed to be imported into `$GOPATH`, rather than being cloned into any working directory. There are two ways to do this. @@ -25,9 +14,9 @@ This Go project is designed to be imported into `$GOPATH`, rather than being clo ``` $ echo $GOPATH /Users/myuser/go - + $ go get github.com/cloudfoundry/noaa - + $ ls ~/go/src/github.com/cloudfoundry/ noaa/ sonde-go/ ``` @@ -36,13 +25,29 @@ This Go project is designed to be imported into `$GOPATH`, rather than being clo ``` $ echo $GOPATH /Users/myuser/go - + $ cd /Users/myuser/go/src/github.com/cloudfoundry $ git clone git@github.com:cloudfoundry/noaa.git $ cd noaa $ go get ./... ``` +## Updates + +### Reconnecting to Traffic Controller + +noaa has recently updated its reconnect strategy from trying to reconnect five +times in quick succession to a back-off strategy. The back-off strategy can be +configured by setting the [SetMinRetryDelay()](https://godoc.org/github.com/cloudfoundry/noaa/consumer#Consumer.SetMinRetryDelay) +and the [SetMaxRetryDelay()](https://godoc.org/github.com/cloudfoundry/noaa/consumer#Consumer.SetMaxRetryDelay). + +During reconnection, noaa will wait initially at the `MinRetryDelay` interval +and double until it reaches `MaxRetryDelay` where it will try reconnecting +indefinitely at the `MaxRetryDelay` interval. + +This behavior will affect functions like `consumer.Firehose()`, `consumer.Stream()` +and `consumer.TailingLogs()`. + ## Sample Applications ### Prerequisites @@ -116,7 +121,11 @@ bin/container_metrics For more information to setup a test environment in order to pull container metrics look at the README.md in the container_metrics sample. -##Development +## Development Use `go get -d -v -t ./... && ginkgo --race --randomizeAllSpecs --failOnPending --skipMeasurements --cover` to run the tests. + + +[slack-badge]: https://slack.cloudfoundry.org/badge.svg +[loggregator-slack]: https://cloudfoundry.slack.com/archives/loggregator diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/noaa/consumer.go b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/noaa/consumer.go deleted file mode 100644 index cdc35e0c..00000000 --- a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/noaa/consumer.go +++ /dev/null @@ -1,510 +0,0 @@ -package noaa - -import ( - "bufio" - "bytes" - "crypto/tls" - "errors" - "fmt" - "io/ioutil" - "log" - "mime/multipart" - "net" - "net/http" - "net/url" - "regexp" - "strings" - "sync" - "time" - - noaa_errors "github.com/cloudfoundry/noaa/errors" - "github.com/cloudfoundry/sonde-go/events" - "github.com/gogo/protobuf/proto" - "github.com/gorilla/websocket" -) - -var ( - // KeepAlive sets the interval between keep-alive messages sent by the client to loggregator. - KeepAlive = 25 * time.Second - reconnectTimeout = 500 * time.Millisecond - boundaryRegexp = regexp.MustCompile("boundary=(.*)") - ErrNotOK = errors.New("unknown issue when making HTTP request to Loggregator") - ErrNotFound = ErrNotOK // NotFound isn't an accurate description of how this is used; please use ErrNotOK instead - ErrBadResponse = errors.New("bad server response") - ErrBadRequest = errors.New("bad client request") - ErrLostConnection = errors.New("remote server terminated connection unexpectedly") -) - -// noaa.Consumer is deprecated. Use the one in the consumer package. -// -// Consumer represents the actions that can be performed against trafficcontroller. -type Consumer struct { - trafficControllerUrl string - tlsConfig *tls.Config - ws *websocket.Conn - callback func() - proxy func(*http.Request) (*url.URL, error) - debugPrinter DebugPrinter - idleTimeout time.Duration - sync.RWMutex - stopChan chan struct{} - client *http.Client - dialer websocket.Dialer -} - -// noaa.Consumer is deprecated. Use the one in the consumer package. -// -// NewConsumer creates a new consumer to a trafficcontroller. -func NewConsumer(trafficControllerUrl string, tlsConfig *tls.Config, proxy func(*http.Request) (*url.URL, error)) *Consumer { - log.Printf("You are using a deprecated noaa consumer (noaa.Consumer). Please switch to 'github.com/cloudfoundry/noaa/consumer'.Consumer at your earliest convenience.") - transport := &http.Transport{Proxy: proxy, TLSClientConfig: tlsConfig} - consumer := &Consumer{ - trafficControllerUrl: trafficControllerUrl, - tlsConfig: tlsConfig, - proxy: proxy, - debugPrinter: NullDebugPrinter{}, - stopChan: make(chan struct{}), - client: &http.Client{Transport: transport}, - } - consumer.dialer = websocket.Dialer{ - NetDial: consumer.proxyDial, - TLSClientConfig: tlsConfig, - } - return consumer -} - -// noaa.Consumer is deprecated. Use the one in the consumer package. -// -// TailingLogs behaves exactly as TailingLogsWithoutReconnect, except that it retries 5 times if the connection -// to the remote server is lost and returns all errors from each attempt on errorChan. -func (cnsmr *Consumer) TailingLogs(appGuid string, authToken string, outputChan chan<- *events.LogMessage, errorChan chan<- error) { - action := func() error { - return cnsmr.TailingLogsWithoutReconnect(appGuid, authToken, outputChan) - } - - cnsmr.retryAction(action, errorChan) -} - -// noaa.Consumer is deprecated. Use the one in the consumer package. -// -// TailingLogsWithoutReconnect listens indefinitely for log messages only; other event types are dropped. -// -// If you wish to be able to terminate the listen early, run TailingLogsWithoutReconnect in a Goroutine and -// call Close() when you are finished listening. -// -// Messages are presented in the order received from the loggregator server. Chronological or -// other ordering is not guaranteed. It is the responsibility of the consumer of these channels -// to provide any desired sorting mechanism. -func (cnsmr *Consumer) TailingLogsWithoutReconnect(appGuid string, authToken string, outputChan chan<- *events.LogMessage) error { - allEvents := make(chan *events.Envelope) - - streamPath := fmt.Sprintf("/apps/%s/stream", appGuid) - errChan := make(chan error) - go func() { - err := cnsmr.stream(streamPath, authToken, allEvents) - errChan <- err - close(errChan) - }() - - go func() { - for event := range allEvents { - if *event.EventType == events.Envelope_LogMessage { - outputChan <- event.GetLogMessage() - } - } - }() - - go func() { - <-cnsmr.stopChan - close(allEvents) - }() - - return <-errChan -} - -// noaa.Consumer is deprecated. Use the one in the consumer package. -// -// Stream behaves exactly as StreamWithoutReconnect, except that it retries 5 times if the connection -// to the remote server is lost. -func (cnsmr *Consumer) Stream(appGuid string, authToken string, outputChan chan<- *events.Envelope, errorChan chan<- error) { - action := func() error { - return cnsmr.StreamWithoutReconnect(appGuid, authToken, outputChan) - } - - cnsmr.retryAction(action, errorChan) -} - -// noaa.Consumer is deprecated. Use the one in the consumer package. -// -// StreamWithoutReconnect listens indefinitely for all log and event messages. -// -// If you wish to be able to terminate the listen early, run StreamWithoutReconnect in a Goroutine and -// call Close() when you are finished listening. -// -// Messages are presented in the order received from the loggregator server. Chronological or other ordering -// is not guaranteed. It is the responsibility of the consumer of these channels to provide any desired sorting -// mechanism. -func (cnsmr *Consumer) StreamWithoutReconnect(appGuid string, authToken string, outputChan chan<- *events.Envelope) error { - streamPath := fmt.Sprintf("/apps/%s/stream", appGuid) - return cnsmr.stream(streamPath, authToken, outputChan) -} - -// noaa.Consumer is deprecated. Use the one in the consumer package. -// -// Firehose behaves exactly as FirehoseWithoutReconnect, except that it retries 5 times if the connection -// to the remote server is lost. -func (cnsmr *Consumer) Firehose(subscriptionId string, authToken string, outputChan chan<- *events.Envelope, errorChan chan<- error) { - action := func() error { - return cnsmr.FirehoseWithoutReconnect(subscriptionId, authToken, outputChan) - } - - cnsmr.retryAction(action, errorChan) -} - -func (cnsmr *Consumer) SetIdleTimeout(idleTimeout time.Duration) { - cnsmr.idleTimeout = idleTimeout -} - -// noaa.Consumer is deprecated. Use the one in the consumer package. -// -// FirehoseWithoutReconnect streams all data. All clients with the same subscriptionId will receive a proportionate share of the -// message stream. Each pool of clients will receive the entire stream. -// -// If you wish to be able to terminate the listen early, run FirehoseWithoutReconnect in a Goroutine and -// call Close() when you are finished listening. -// -// Messages are presented in the order received from the loggregator server. Chronological or other ordering -// is not guaranteed. It is the responsibility of the consumer of these channels to provide any desired sorting -// mechanism. -func (cnsmr *Consumer) FirehoseWithoutReconnect(subscriptionId string, authToken string, outputChan chan<- *events.Envelope) error { - streamPath := "/firehose/" + subscriptionId - return cnsmr.stream(streamPath, authToken, outputChan) -} - -func (cnsmr *Consumer) stream(streamPath string, authToken string, outputChan chan<- *events.Envelope) error { - var err error - - cnsmr.Lock() - cnsmr.ws, err = cnsmr.establishWebsocketConnection(streamPath, authToken) - cnsmr.Unlock() - - if err != nil { - return err - } - - return cnsmr.listenForMessages(outputChan) -} - -func makeError(err error, code int32) *events.Envelope { - return &events.Envelope{ - EventType: events.Envelope_Error.Enum(), - Error: &events.Error{ - Source: proto.String("NOAA"), - Code: &code, - Message: proto.String(err.Error()), - }, - } -} - -// noaa.Consumer is deprecated. Use the one in the consumer package. -// -// RecentLogs connects to trafficcontroller via its 'recentlogs' http(s) endpoint and returns a slice of recent messages. -// It does not guarantee any order of the messages; they are in the order returned by trafficcontroller. -// -// The SortRecent method is provided to sort the data returned by this method. -func (cnsmr *Consumer) RecentLogs(appGuid string, authToken string) ([]*events.LogMessage, error) { - envelopes, err := cnsmr.readEnvelopesFromTrafficController(appGuid, authToken, "recentlogs") - - if err != nil { - return nil, err - } - - messages := make([]*events.LogMessage, 0, 200) - for _, envelope := range envelopes { - messages = append(messages, envelope.GetLogMessage()) - } - - return messages, err -} - -// noaa.Consumer is deprecated. Use the one in the consumer package. -// -// ContainerMetrics connects to trafficcontroller via its 'containermetrics' http(s) endpoint and returns the most recent messages for an app. -// The returned metrics will be sorted by InstanceIndex. -func (cnsmr *Consumer) ContainerMetrics(appGuid string, authToken string) ([]*events.ContainerMetric, error) { - envelopes, err := cnsmr.readEnvelopesFromTrafficController(appGuid, authToken, "containermetrics") - - if err != nil { - return nil, err - } - - messages := make([]*events.ContainerMetric, 0, 200) - - for _, envelope := range envelopes { - if envelope.GetEventType() == events.Envelope_LogMessage { - return []*events.ContainerMetric{}, errors.New(fmt.Sprintf("Upstream error: %s", envelope.GetLogMessage().GetMessage())) - } - - messages = append(messages, envelope.GetContainerMetric()) - } - - SortContainerMetrics(messages) - - return messages, err -} - -func (cnsmr *Consumer) readEnvelopesFromTrafficController(appGuid string, authToken string, endpoint string) ([]*events.Envelope, error) { - trafficControllerUrl, err := url.ParseRequestURI(cnsmr.trafficControllerUrl) - if err != nil { - return nil, err - } - - scheme := "https" - - if trafficControllerUrl.Scheme == "ws" { - scheme = "http" - } - - recentPath := fmt.Sprintf("%s://%s/apps/%s/%s", scheme, trafficControllerUrl.Host, appGuid, endpoint) - - req, _ := http.NewRequest("GET", recentPath, nil) - req.Header.Set("Authorization", authToken) - - resp, err := cnsmr.client.Do(req) - - if err != nil { - return nil, errors.New(fmt.Sprintf("Error dialing trafficcontroller server: %s.\nPlease ask your Cloud Foundry Operator to check the platform configuration (trafficcontroller endpoint is %s).", err.Error(), cnsmr.trafficControllerUrl)) - } - - defer resp.Body.Close() - - err = checkForErrors(resp) - if err != nil { - return nil, err - } - - reader, err := getMultipartReader(resp) - if err != nil { - return nil, err - } - - var envelopes []*events.Envelope - var buffer bytes.Buffer - - for part, loopErr := reader.NextPart(); loopErr == nil; part, loopErr = reader.NextPart() { - buffer.Reset() - - _, err := buffer.ReadFrom(part) - if err != nil { - break - } - - envelope := new(events.Envelope) - proto.Unmarshal(buffer.Bytes(), envelope) - - envelopes = append(envelopes, envelope) - } - - return envelopes, nil -} - -func checkForErrors(resp *http.Response) error { - if resp.StatusCode == http.StatusUnauthorized { - data, _ := ioutil.ReadAll(resp.Body) - return noaa_errors.NewUnauthorizedError(string(data)) - } - - if resp.StatusCode == http.StatusBadRequest { - return ErrBadRequest - } - - if resp.StatusCode != http.StatusOK { - return ErrNotOK - } - return nil -} - -func getMultipartReader(resp *http.Response) (*multipart.Reader, error) { - contentType := resp.Header.Get("Content-Type") - - if len(strings.TrimSpace(contentType)) == 0 { - return nil, ErrBadResponse - } - - matches := boundaryRegexp.FindStringSubmatch(contentType) - - if len(matches) != 2 || len(strings.TrimSpace(matches[1])) == 0 { - return nil, ErrBadResponse - } - reader := multipart.NewReader(resp.Body, matches[1]) - return reader, nil -} - -// noaa.Consumer is deprecated. Use the one in the consumer package. -// -// Close terminates the websocket connection to trafficcontroller. -func (cnsmr *Consumer) Close() error { - cnsmr.Lock() - defer cnsmr.Unlock() - defer close(cnsmr.stopChan) - if cnsmr.ws == nil { - return errors.New("connection does not exist") - } - - cnsmr.ws.WriteControl(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, ""), time.Time{}) - return cnsmr.ws.Close() -} - -// noaa.Consumer is deprecated. Use the one in the consumer package. -// -// SetOnConnectCallback sets a callback function to be called with the websocket connection is established. -func (cnsmr *Consumer) SetOnConnectCallback(cb func()) { - cnsmr.callback = cb -} - -// noaa.Consumer is deprecated. Use the one in the consumer package. -// -// SetDebugPrinter enables logging of the websocket handshake. -func (cnsmr *Consumer) SetDebugPrinter(debugPrinter DebugPrinter) { - cnsmr.debugPrinter = debugPrinter -} - -func (cnsmr *Consumer) listenForMessages(msgChan chan<- *events.Envelope) error { - defer cnsmr.ws.Close() - - for { - if cnsmr.idleTimeout != 0 { - cnsmr.ws.SetReadDeadline(time.Now().Add(cnsmr.idleTimeout)) - } - _, data, err := cnsmr.ws.ReadMessage() - if err != nil { - return err - } - - envelope := &events.Envelope{} - err = proto.Unmarshal(data, envelope) - if err != nil { - continue - } - - msgChan <- envelope - } -} - -func headersString(header http.Header) string { - var result string - for name, values := range header { - result += name + ": " + strings.Join(values, ", ") + "\n" - } - return result -} - -func (cnsmr *Consumer) establishWebsocketConnection(path string, authToken string) (*websocket.Conn, error) { - header := http.Header{"Origin": []string{"http://localhost"}, "Authorization": []string{authToken}} - - url := cnsmr.trafficControllerUrl + path - - cnsmr.debugPrinter.Print("WEBSOCKET REQUEST:", - "GET "+path+" HTTP/1.1\n"+ - "Host: "+cnsmr.trafficControllerUrl+"\n"+ - "Upgrade: websocket\nConnection: Upgrade\nSec-WebSocket-Version: 13\nSec-WebSocket-Key: [HIDDEN]\n"+ - headersString(header)) - - ws, resp, err := cnsmr.dialer.Dial(url, header) - - if resp != nil { - cnsmr.debugPrinter.Print("WEBSOCKET RESPONSE:", - resp.Proto+" "+resp.Status+"\n"+ - headersString(resp.Header)) - } - - if resp != nil && resp.StatusCode == http.StatusUnauthorized { - bodyData, _ := ioutil.ReadAll(resp.Body) - err = noaa_errors.NewUnauthorizedError(string(bodyData)) - return ws, err - } - - if err == nil && cnsmr.callback != nil { - cnsmr.callback() - } - - if err != nil { - - return nil, errors.New(fmt.Sprintf("Error dialing trafficcontroller server: %s.\nPlease ask your Cloud Foundry Operator to check the platform configuration (trafficcontroller is %s).", err.Error(), cnsmr.trafficControllerUrl)) - } - - return ws, err -} - -func (cnsmr *Consumer) proxyDial(network, addr string) (net.Conn, error) { - targetUrl, err := url.Parse("http://" + addr) - if err != nil { - return nil, err - } - - proxy := cnsmr.proxy - if proxy == nil { - proxy = http.ProxyFromEnvironment - } - - proxyUrl, err := proxy(&http.Request{URL: targetUrl}) - if err != nil { - return nil, err - } - if proxyUrl == nil { - return net.Dial(network, addr) - } - - proxyConn, err := net.Dial(network, proxyUrl.Host) - if err != nil { - return nil, err - } - - connectReq := &http.Request{ - Method: "CONNECT", - URL: targetUrl, - Host: targetUrl.Host, - Header: make(http.Header), - } - connectReq.Write(proxyConn) - - connectResp, err := http.ReadResponse(bufio.NewReader(proxyConn), connectReq) - if err != nil { - proxyConn.Close() - return nil, err - } - if connectResp.StatusCode != http.StatusOK { - f := strings.SplitN(connectResp.Status, " ", 2) - proxyConn.Close() - return nil, errors.New(f[1]) - } - - return proxyConn, nil -} - -func (cnsmr *Consumer) retryAction(action func() error, errorChan chan<- error) { - reconnectAttempts := 0 - - oldConnectCallback := cnsmr.callback - defer func() { cnsmr.callback = oldConnectCallback }() - - defer close(errorChan) - - cnsmr.callback = func() { - reconnectAttempts = 0 - if oldConnectCallback != nil { - oldConnectCallback() - } - } - - for ; reconnectAttempts < 5; reconnectAttempts++ { - select { - case <-cnsmr.stopChan: - return - default: - } - - errorChan <- action() - time.Sleep(reconnectTimeout) - } -} diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/noaa/consumer/async.go b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/noaa/consumer/async.go index 943329a4..5fb20bf8 100644 --- a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/noaa/consumer/async.go +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/noaa/consumer/async.go @@ -1,24 +1,57 @@ package consumer import ( - "bufio" "errors" "fmt" "io/ioutil" - "net" "net/http" "net/url" "strings" "sync" + "sync/atomic" "time" - "github.com/cloudfoundry/noaa" noaa_errors "github.com/cloudfoundry/noaa/errors" "github.com/cloudfoundry/sonde-go/events" "github.com/gogo/protobuf/proto" "github.com/gorilla/websocket" ) +const ( + DefaultMinRetryDelay = 500 * time.Millisecond + DefaultMaxRetryDelay = time.Minute + DefaultMaxRetryCount = 1000 +) + +// SetMinRetryDelay sets the duration that automatically reconnecting methods +// on c (e.g. Firehose, Stream, TailingLogs) will sleep for after receiving +// an error from the traffic controller. +// +// Successive errors will double the sleep time, up to c's max retry delay, +// set by c.SetMaxRetryDelay. +// +// Defaults to DefaultMinRetryDelay. +func (c *Consumer) SetMinRetryDelay(d time.Duration) { + atomic.StoreInt64(&c.minRetryDelay, int64(d)) +} + +// SetMaxRetryDelay sets the maximum duration that automatically reconnecting +// methods on c (e.g. Firehose, Stream, TailingLogs) will sleep for after +// receiving many successive errors from the traffic controller. +// +// Defaults to DefaultMaxRetryDelay. +func (c *Consumer) SetMaxRetryDelay(d time.Duration) { + atomic.StoreInt64(&c.maxRetryDelay, int64(d)) +} + +// SetMaxRetryCount sets the maximum number of reconnnection attemps that +// methods on c (e.g. Firehose, Stream, TailingLogs) will make before failing. +// +// Defaults to DefaultMaxRetryCount. +func (c *Consumer) SetMaxRetryCount(count int) { + atomic.StoreInt64(&c.maxRetryCount, int64(count)) +} + // TailingLogs listens indefinitely for log messages only; other event types // are dropped. // Whenever an error is encountered, the error will be sent down the error @@ -31,13 +64,13 @@ import ( // Errors must be drained from the returned error channel for it to continue // retrying; if they are not drained, the connection attempts will hang. func (c *Consumer) TailingLogs(appGuid, authToken string) (<-chan *events.LogMessage, <-chan error) { - return c.tailingLogs(appGuid, authToken, maxRetries) + return c.tailingLogs(appGuid, authToken, true) } // TailingLogsWithoutReconnect functions identically to TailingLogs but without // any reconnect attempts when errors occur. func (c *Consumer) TailingLogsWithoutReconnect(appGuid string, authToken string) (<-chan *events.LogMessage, <-chan error) { - return c.tailingLogs(appGuid, authToken, 0) + return c.tailingLogs(appGuid, authToken, false) } // Stream listens indefinitely for all log and event messages. @@ -47,17 +80,15 @@ func (c *Consumer) TailingLogsWithoutReconnect(appGuid string, authToken string) // of the consumer of these channels to provide any desired sorting mechanism. // // Whenever an error is encountered, the error will be sent down the error -// channel and Stream will attempt to reconnect up to 5 times. After five -// failed reconnection attempts, Stream will give up and close the error and -// Envelope channels. +// channel and Stream will attempt to reconnect indefinitely. func (c *Consumer) Stream(appGuid string, authToken string) (outputChan <-chan *events.Envelope, errorChan <-chan error) { - return c.runStream(appGuid, authToken, maxRetries) + return c.runStream(appGuid, authToken, true) } // StreamWithoutReconnect functions identically to Stream but without any // reconnect attempts when errors occur. func (c *Consumer) StreamWithoutReconnect(appGuid string, authToken string) (<-chan *events.Envelope, <-chan error) { - return c.runStream(appGuid, authToken, 0) + return c.runStream(appGuid, authToken, false) } // Firehose streams all data. All clients with the same subscriptionId will @@ -69,22 +100,47 @@ func (c *Consumer) StreamWithoutReconnect(appGuid string, authToken string) (<-c // of the consumer of these channels to provide any desired sorting mechanism. // // Whenever an error is encountered, the error will be sent down the error -// channel and Firehose will attempt to reconnect up to 5 times. After five -// failed reconnection attempts, Firehose will give up and close the error and -// Envelope channels. -func (c *Consumer) Firehose(subscriptionId string, authToken string) (<-chan *events.Envelope, <-chan error) { - return c.firehose(subscriptionId, authToken, 5) +// channel and Firehose will attempt to reconnect indefinitely. +func (c *Consumer) Firehose( + subscriptionId string, + authToken string, +) (<-chan *events.Envelope, <-chan error) { + return c.firehose(newFirehose( + subscriptionId, + authToken, + )) } // FirehoseWithoutReconnect functions identically to Firehose but without any // reconnect attempts when errors occur. -func (c *Consumer) FirehoseWithoutReconnect(subscriptionId string, authToken string) (<-chan *events.Envelope, <-chan error) { - return c.firehose(subscriptionId, authToken, 0) +func (c *Consumer) FirehoseWithoutReconnect( + subscriptionId string, + authToken string, +) (<-chan *events.Envelope, <-chan error) { + return c.firehose(newFirehose( + subscriptionId, + authToken, + WithRetry(false), + )) +} + +// FilteredFirehose streams a filtered set of envelopes. It has functionality +// similar to Firehose. +func (c *Consumer) FilteredFirehose( + subscriptionId string, + authToken string, + filter EnvelopeFilter, +) (<-chan *events.Envelope, <-chan error) { + return c.firehose(newFirehose( + subscriptionId, + authToken, + WithEnvelopeFilter(filter), + )) } // SetDebugPrinter sets the websocket connection to write debug information to // debugPrinter. -func (c *Consumer) SetDebugPrinter(debugPrinter noaa.DebugPrinter) { +func (c *Consumer) SetDebugPrinter(debugPrinter DebugPrinter) { c.debugPrinter = debugPrinter } @@ -124,7 +180,7 @@ func (c *Consumer) onConnectCallback() func() { return c.callback } -func (c *Consumer) tailingLogs(appGuid, authToken string, retries uint) (<-chan *events.LogMessage, <-chan error) { +func (c *Consumer) tailingLogs(appGuid, authToken string, retry bool) (<-chan *events.LogMessage, <-chan error) { outputs := make(chan *events.LogMessage) errors := make(chan error, 1) callback := func(env *events.Envelope) { @@ -137,12 +193,12 @@ func (c *Consumer) tailingLogs(appGuid, authToken string, retries uint) (<-chan go func() { defer close(errors) defer close(outputs) - c.streamAppDataTo(conn, appGuid, authToken, callback, errors, retries) + c.streamAppDataTo(conn, appGuid, authToken, callback, errors, retry) }() return outputs, errors } -func (c *Consumer) runStream(appGuid, authToken string, retries uint) (<-chan *events.Envelope, <-chan error) { +func (c *Consumer) runStream(appGuid, authToken string, retry bool) (<-chan *events.Envelope, <-chan error) { outputs := make(chan *events.Envelope) errors := make(chan error, 1) @@ -154,29 +210,38 @@ func (c *Consumer) runStream(appGuid, authToken string, retries uint) (<-chan *e go func() { defer close(errors) defer close(outputs) - c.streamAppDataTo(conn, appGuid, authToken, callback, errors, retries) + c.streamAppDataTo(conn, appGuid, authToken, callback, errors, retry) }() return outputs, errors } -func (c *Consumer) streamAppDataTo(conn *connection, appGuid, authToken string, callback func(*events.Envelope), errors chan<- error, retries uint) { - streamPath := fmt.Sprintf("/apps/%s/stream", appGuid) - c.retryAction(c.listenAction(conn, streamPath, authToken, callback), errors, retries) +func (c *Consumer) streamAppDataTo(conn *connection, appGuid, authToken string, callback func(*events.Envelope), errors chan<- error, retry bool) { + streamPath := c.streamPathBuilder(appGuid) + if retry { + c.retryAction(c.listenAction(conn, streamPath, authToken, callback), errors) + return + } + err, _ := c.listenAction(conn, streamPath, authToken, callback)() + errors <- err } -func (c *Consumer) firehose(subID, authToken string, retries uint) (<-chan *events.Envelope, <-chan error) { +func (c *Consumer) firehose(options *firehose) (<-chan *events.Envelope, <-chan error) { outputs := make(chan *events.Envelope) errors := make(chan error, 1) callback := func(env *events.Envelope) { outputs <- env } - streamPath := "/firehose/" + subID conn := c.newConn() go func() { defer close(errors) defer close(outputs) - c.retryAction(c.listenAction(conn, streamPath, authToken, callback), errors, retries) + if options.retry { + c.retryAction(c.listenAction(conn, options.streamPath(), options.authToken, callback), errors) + return + } + err, _ := c.listenAction(conn, options.streamPath(), options.authToken, callback)() + errors <- err }() return outputs, errors } @@ -198,6 +263,10 @@ func (c *Consumer) listenForMessages(conn *connection, callback func(*events.Env return nil } + if c.isTimeoutErr(err) { + return noaa_errors.NewRetryError(err) + } + if err != nil { return err } @@ -226,29 +295,72 @@ func (c *Consumer) listenAction(conn *connection, streamPath, authToken string, } } -func (c *Consumer) retryAction(action func() (err error, done bool), errors chan<- error, retries uint) { - reconnectAttempts := uint(0) - +func (c *Consumer) retryAction(action func() (err error, done bool), errors chan<- error) { oldConnectCallback := c.onConnectCallback() defer c.SetOnConnectCallback(oldConnectCallback) + context := retryContext{ + sleep: atomic.LoadInt64(&c.minRetryDelay), + count: 0, + } + c.SetOnConnectCallback(func() { - reconnectAttempts = 0 + atomic.StoreInt64(&context.sleep, atomic.LoadInt64(&c.minRetryDelay)) + atomic.StoreInt64(&context.count, 0) if oldConnectCallback != nil { oldConnectCallback() } }) - for ; reconnectAttempts <= retries; reconnectAttempts++ { + for { err, done := action() if done { return } + + if _, ok := err.(noaa_errors.NonRetryError); ok { + c.debugPrinter.Print("WEBSOCKET ERROR", err.Error()) + errors <- err + return + } + + retryCount := atomic.LoadInt64(&context.count) + maxRetryCount := atomic.LoadInt64(&c.maxRetryCount) + if retryCount >= maxRetryCount { + c.debugPrinter.Print("WEBSOCKET ERROR", fmt.Sprintf("Maximum number of retries %d reached", maxRetryCount)) + errors <- ErrMaxRetriesReached + return + } + atomic.StoreInt64(&context.count, retryCount+1) + + if err != nil { + c.debugPrinter.Print("WEBSOCKET ERROR", fmt.Sprintf("%s. Retrying...", err.Error())) + err = noaa_errors.NewRetryError(err) + } + errors <- err - time.Sleep(reconnectTimeout) + + ns := atomic.LoadInt64(&context.sleep) + time.Sleep(time.Duration(ns)) + ns = atomic.AddInt64(&context.sleep, ns) + max := atomic.LoadInt64(&c.maxRetryDelay) + if ns > max { + atomic.StoreInt64(&context.sleep, max) + } } } +func (c *Consumer) isTimeoutErr(err error) bool { + if err == nil { + return false + } + + // This is an unfortunate way to validate this, + // however the error type is `*websocket.netError` + // which is not exported + return strings.Contains(err.Error(), "i/o timeout") +} + func (c *Consumer) newConn() *connection { conn := &connection{} c.connsLock.Lock() @@ -262,7 +374,15 @@ func (c *Consumer) websocketConn(path, authToken string) (*websocket.Conn, error return c.websocketConnNewToken(path) } - var err error + URL, err := url.Parse(c.trafficControllerUrl + path) + if err != nil { + return nil, noaa_errors.NewNonRetryError(err) + } + + if URL.Scheme != "wss" && URL.Scheme != "ws" { + return nil, noaa_errors.NewNonRetryError(fmt.Errorf("Invalid scheme '%s'", URL.Scheme)) + } + ws, httpErr := c.tryWebsocketConnection(path, authToken) if httpErr != nil { err = httpErr.error @@ -300,10 +420,10 @@ func (c *Consumer) establishWebsocketConnection(path, authToken string) (*websoc } func (c *Consumer) tryWebsocketConnection(path, token string) (*websocket.Conn, *httpError) { - header := http.Header{"Origin": []string{"http://localhost"}, "Authorization": []string{token}} + header := http.Header{"Origin": []string{c.trafficControllerUrl}, "Authorization": []string{token}} url := c.trafficControllerUrl + path - c.debugPrinter.Print("WEBSOCKET REQUEST:", + c.debugPrinter.Print("WEBSOCKET REQUEST", "GET "+path+" HTTP/1.1\n"+ "Host: "+c.trafficControllerUrl+"\n"+ "Upgrade: websocket\nConnection: Upgrade\nSec-WebSocket-Version: 13\nSec-WebSocket-Key: [HIDDEN]\n"+ @@ -311,7 +431,7 @@ func (c *Consumer) tryWebsocketConnection(path, token string) (*websocket.Conn, ws, resp, err := c.dialer.Dial(url, header) if resp != nil { - c.debugPrinter.Print("WEBSOCKET RESPONSE:", + c.debugPrinter.Print("WEBSOCKET RESPONSE", resp.Proto+" "+resp.Status+"\n"+ headersString(resp.Header)) } @@ -327,58 +447,12 @@ func (c *Consumer) tryWebsocketConnection(path, token string) (*websocket.Conn, if err != nil { errMsg := "Error dialing trafficcontroller server: %s.\n" + "Please ask your Cloud Foundry Operator to check the platform configuration (trafficcontroller is %s)." - httpErr.error = errors.New(fmt.Sprintf(errMsg, err.Error(), c.trafficControllerUrl)) + httpErr.error = fmt.Errorf(errMsg, err.Error(), c.trafficControllerUrl) return nil, httpErr } return ws, nil } -func (c *Consumer) proxyDial(network, addr string) (net.Conn, error) { - targetUrl, err := url.Parse("http://" + addr) - if err != nil { - return nil, err - } - - proxy := c.proxy - if proxy == nil { - proxy = http.ProxyFromEnvironment - } - - proxyUrl, err := proxy(&http.Request{URL: targetUrl}) - if err != nil { - return nil, err - } - if proxyUrl == nil { - return net.Dial(network, addr) - } - - proxyConn, err := net.Dial(network, proxyUrl.Host) - if err != nil { - return nil, err - } - - connectReq := &http.Request{ - Method: "CONNECT", - URL: targetUrl, - Host: targetUrl.Host, - Header: make(http.Header), - } - connectReq.Write(proxyConn) - - connectResp, err := http.ReadResponse(bufio.NewReader(proxyConn), connectReq) - if err != nil { - proxyConn.Close() - return nil, err - } - if connectResp.StatusCode != http.StatusOK { - f := strings.SplitN(connectResp.Status, " ", 2) - proxyConn.Close() - return nil, errors.New(f[1]) - } - - return proxyConn, nil -} - func headersString(header http.Header) string { var result string for name, values := range header { @@ -427,3 +501,12 @@ func (c *connection) closed() bool { defer c.lock.Unlock() return c.isClosed } + +// retryContext is a struct to keep track of a retryAction call's context. We +// use it primarily to guarantee 64-bit byte alignment on 32-bit systems. +// https://golang.org/src/sync/atomic/doc.go?#L50 +type retryContext struct { + // sleep and count must be the first words within this struct to ensure + // 64-bit byte alignment. + sleep, count int64 +} diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/noaa/consumer/consumer.go b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/noaa/consumer/consumer.go index ebd54c0b..196d0ba9 100644 --- a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/noaa/consumer/consumer.go +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/noaa/consumer/consumer.go @@ -12,26 +12,23 @@ import ( "github.com/cloudfoundry/noaa/consumer/internal" - "github.com/cloudfoundry/noaa" + "fmt" + noaa_errors "github.com/cloudfoundry/noaa/errors" "github.com/gorilla/websocket" ) -const ( - reconnectTimeout = 500 * time.Millisecond - maxRetries uint = 5 -) - var ( // KeepAlive sets the interval between keep-alive messages sent by the client to loggregator. KeepAlive = 25 * time.Second - boundaryRegexp = regexp.MustCompile("boundary=(.*)") - ErrNotOK = errors.New("unknown issue when making HTTP request to Loggregator") - ErrNotFound = ErrNotOK // NotFound isn't an accurate description of how this is used; please use ErrNotOK instead - ErrBadResponse = errors.New("bad server response") - ErrBadRequest = errors.New("bad client request") - ErrLostConnection = errors.New("remote server terminated connection unexpectedly") + boundaryRegexp = regexp.MustCompile("boundary=(.*)") + ErrNotOK = errors.New("unknown issue when making HTTP request to Loggregator") + ErrNotFound = ErrNotOK // NotFound isn't an accurate description of how this is used; please use ErrNotOK instead + ErrBadResponse = errors.New("bad server response") + ErrBadRequest = errors.New("bad client request") + ErrLostConnection = errors.New("remote server terminated connection unexpectedly") + ErrMaxRetriesReached = errors.New("maximum number of connection retries reached") ) //go:generate hel --type DebugPrinter --output mock_debug_printer_test.go @@ -41,14 +38,27 @@ type DebugPrinter interface { Print(title, dump string) } +type nullDebugPrinter struct { +} + +func (nullDebugPrinter) Print(title, body string) { +} + +type RecentPathBuilder func(trafficControllerUrl *url.URL, appGuid string, endpoint string) string +type StreamPathBuilder func(appGuid string) string + // Consumer represents the actions that can be performed against trafficcontroller. // See sync.go and async.go for trafficcontroller access methods. type Consumer struct { + // minRetryDelay, maxRetryDelay, and maxRetryCount must be the first words in + // this struct in order to be used atomically by 32-bit systems. + // https://golang.org/src/sync/atomic/doc.go?#L50 + minRetryDelay, maxRetryDelay, maxRetryCount int64 + trafficControllerUrl string idleTimeout time.Duration callback func() callbackLock sync.RWMutex - proxy func(*http.Request) (*url.URL, error) debugPrinter DebugPrinter client *http.Client dialer websocket.Dialer @@ -59,19 +69,62 @@ type Consumer struct { refreshTokens bool refresherMutex sync.RWMutex tokenRefresher TokenRefresher + + recentPathBuilder RecentPathBuilder + streamPathBuilder StreamPathBuilder } // New creates a new consumer to a trafficcontroller. func New(trafficControllerUrl string, tlsConfig *tls.Config, proxy func(*http.Request) (*url.URL, error)) *Consumer { - transport := &http.Transport{Proxy: proxy, TLSClientConfig: tlsConfig, TLSHandshakeTimeout: internal.HandshakeTimeout, DisableKeepAlives: true} - consumer := &Consumer{ + if proxy == nil { + proxy = http.ProxyFromEnvironment + } + + return &Consumer{ trafficControllerUrl: trafficControllerUrl, - proxy: proxy, - debugPrinter: noaa.NullDebugPrinter{}, - client: &http.Client{Transport: transport}, + debugPrinter: nullDebugPrinter{}, + client: &http.Client{ + Transport: &http.Transport{ + Proxy: proxy, + TLSClientConfig: tlsConfig, + TLSHandshakeTimeout: internal.Timeout, + DisableKeepAlives: true, + }, + Timeout: internal.Timeout, + }, + minRetryDelay: int64(DefaultMinRetryDelay), + maxRetryDelay: int64(DefaultMaxRetryDelay), + maxRetryCount: int64(DefaultMaxRetryCount), + dialer: websocket.Dialer{ + HandshakeTimeout: internal.Timeout, + Proxy: proxy, + TLSClientConfig: tlsConfig, + }, + recentPathBuilder: defaultRecentPathBuilder, + streamPathBuilder: defaultStreamPathBuilder, } - consumer.dialer = websocket.Dialer{HandshakeTimeout: internal.HandshakeTimeout, NetDial: consumer.proxyDial, TLSClientConfig: tlsConfig} - return consumer +} + +func defaultRecentPathBuilder(trafficControllerUrl *url.URL, appGuid string, endpoint string) string { + scheme := "https" + if trafficControllerUrl.Scheme == "ws" { + scheme = "http" + } + + return fmt.Sprintf("%s://%s/apps/%s/%s", scheme, trafficControllerUrl.Host, appGuid, endpoint) + +} + +func (c *Consumer) SetRecentPathBuilder(b RecentPathBuilder) { + c.recentPathBuilder = b +} + +func defaultStreamPathBuilder(appGuid string) string { + return fmt.Sprintf("/apps/%s/stream", appGuid) +} + +func (c *Consumer) SetStreamPathBuilder(b StreamPathBuilder) { + c.streamPathBuilder = b } type httpError struct { diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/noaa/consumer/filter.go b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/noaa/consumer/filter.go new file mode 100644 index 00000000..8a247298 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/noaa/consumer/filter.go @@ -0,0 +1,20 @@ +package consumer + +type EnvelopeFilter int + +const ( + LogMessages EnvelopeFilter = iota + Metrics + allEnvelopes +) + +func (f EnvelopeFilter) queryStringParam() string { + switch f { + case LogMessages: + return "filter-type=logs" + case Metrics: + return "filter-type=metrics" + default: + return "" + } +} diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/noaa/consumer/firehose.go b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/noaa/consumer/firehose.go new file mode 100644 index 00000000..57e93509 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/noaa/consumer/firehose.go @@ -0,0 +1,44 @@ +package consumer + +type firehose struct { + subscriptionID string + authToken string + retry bool + envelopeFilter EnvelopeFilter +} + +type FirehoseOption func(*firehose) + +func WithRetry(retry bool) FirehoseOption { + return func(f *firehose) { + f.retry = retry + } +} +func WithEnvelopeFilter(filter EnvelopeFilter) FirehoseOption { + return func(f *firehose) { + f.envelopeFilter = filter + } +} + +func newFirehose( + subID string, + authToken string, + opts ...FirehoseOption, +) *firehose { + f := &firehose{ + subscriptionID: subID, + authToken: authToken, + retry: true, + envelopeFilter: allEnvelopes, + } + + for _, o := range opts { + o(f) + } + + return f +} + +func (f *firehose) streamPath() string { + return "/firehose/" + f.subscriptionID + "?" + f.envelopeFilter.queryStringParam() +} diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/noaa/consumer/internal/timeout.go b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/noaa/consumer/internal/timeout.go index 0ee05981..c8a56b59 100644 --- a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/noaa/consumer/internal/timeout.go +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/noaa/consumer/internal/timeout.go @@ -4,5 +4,5 @@ import "time" var ( // DO NOT USE - HandshakeTimeout = 10 * time.Second + Timeout = 10 * time.Second ) diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/noaa/consumer/sync.go b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/noaa/consumer/sync.go index 5b5391d0..8b216008 100644 --- a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/noaa/consumer/sync.go +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/noaa/consumer/sync.go @@ -21,64 +21,73 @@ import ( // The noaa.SortRecent function is provided to sort the data returned by // this method. func (c *Consumer) RecentLogs(appGuid string, authToken string) ([]*events.LogMessage, error) { - messages := make([]*events.LogMessage, 0, 200) - callback := func(envelope *events.Envelope) error { - messages = append(messages, envelope.GetLogMessage()) - return nil - } - err := c.readTC(appGuid, authToken, "recentlogs", callback) + envelopes, err := c.readTC(appGuid, authToken, "recentlogs") if err != nil { return nil, err } + messages := make([]*events.LogMessage, 0, 200) + for _, env := range envelopes { + messages = append(messages, env.GetLogMessage()) + } return messages, nil } -// ContainerMetrics connects to trafficcontroller via its 'containermetrics' -// http(s) endpoint and returns the most recent messages for an app. The -// returned metrics will be sorted by InstanceIndex. +// ContainerMetrics is deprecated in favor of ContainerEnvelopes, since +// returning the ContainerMetric type directly hides important +// information, like the timestamp. +// +// The returned values will be the same as ContainerEnvelopes, just with +// the Envelope stripped out. func (c *Consumer) ContainerMetrics(appGuid string, authToken string) ([]*events.ContainerMetric, error) { - messages := make([]*events.ContainerMetric, 0, 200) - callback := func(envelope *events.Envelope) error { - if envelope.GetEventType() == events.Envelope_LogMessage { - return errors.New(fmt.Sprintf("Upstream error: %s", envelope.GetLogMessage().GetMessage())) - } - messages = append(messages, envelope.GetContainerMetric()) - return nil - } - err := c.readTC(appGuid, authToken, "containermetrics", callback) + envelopes, err := c.ContainerEnvelopes(appGuid, authToken) if err != nil { return nil, err } + messages := make([]*events.ContainerMetric, 0, len(envelopes)) + for _, env := range envelopes { + messages = append(messages, env.GetContainerMetric()) + } noaa.SortContainerMetrics(messages) - return messages, err + return messages, nil } -func (c *Consumer) readTC(appGuid string, authToken string, endpoint string, callback func(*events.Envelope) error) error { - trafficControllerUrl, err := url.ParseRequestURI(c.trafficControllerUrl) +// ContainerEnvelopes connects to trafficcontroller via its 'containermetrics' +// http(s) endpoint and returns the most recent dropsonde envelopes for an app. +func (c *Consumer) ContainerEnvelopes(appGuid, authToken string) ([]*events.Envelope, error) { + envelopes, err := c.readTC(appGuid, authToken, "containermetrics") if err != nil { - return err + return nil, err } + for _, env := range envelopes { + if env.GetEventType() == events.Envelope_LogMessage { + return nil, errors.New(fmt.Sprintf("Upstream error: %s", env.GetLogMessage().GetMessage())) + } + } + return envelopes, nil +} - scheme := "https" - if trafficControllerUrl.Scheme == "ws" { - scheme = "http" +func (c *Consumer) readTC(appGuid string, authToken string, endpoint string) ([]*events.Envelope, error) { + trafficControllerUrl, err := url.ParseRequestURI(c.trafficControllerUrl) + if err != nil { + return nil, err } - recentPath := fmt.Sprintf("%s://%s/apps/%s/%s", scheme, trafficControllerUrl.Host, appGuid, endpoint) + recentPath := c.recentPathBuilder(trafficControllerUrl, appGuid, endpoint) resp, err := c.requestTC(recentPath, authToken) if err != nil { - return err + return nil, err } defer resp.Body.Close() reader, err := getMultipartReader(resp) if err != nil { - return err + return nil, err } var buffer bytes.Buffer + var envelopes []*events.Envelope for part, loopErr := reader.NextPart(); loopErr == nil; part, loopErr = reader.NextPart() { buffer.Reset() @@ -90,13 +99,10 @@ func (c *Consumer) readTC(appGuid string, authToken string, endpoint string, cal envelope := new(events.Envelope) proto.Unmarshal(buffer.Bytes(), envelope) - err = callback(envelope) - if err != nil { - return err - } + envelopes = append(envelopes, envelope) } - return nil + return envelopes, nil } func (c *Consumer) requestTC(path, authToken string) (*http.Response, error) { diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/noaa/debug_printer.go b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/noaa/debug_printer.go deleted file mode 100644 index 76982457..00000000 --- a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/noaa/debug_printer.go +++ /dev/null @@ -1,11 +0,0 @@ -package noaa - -type DebugPrinter interface { - Print(title, dump string) -} - -type NullDebugPrinter struct { -} - -func (NullDebugPrinter) Print(title, body string) { -} diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/noaa/errors/non_retry_error.go b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/noaa/errors/non_retry_error.go new file mode 100644 index 00000000..c4473ca5 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/noaa/errors/non_retry_error.go @@ -0,0 +1,22 @@ +package errors + +import "fmt" + +// NonRetryError is a type that noaa uses when it encountered an error, +// and is not going to retry the operation. When errors of this type +// are encountered, they should result in a closed connection. +type NonRetryError struct { + Err error +} + +// NewNonRetryError constructs a NonRetryError from any error. +func NewNonRetryError(err error) NonRetryError { + return NonRetryError{ + Err: err, + } +} + +// Error implements error. +func (e NonRetryError) Error() string { + return fmt.Sprintf("Please ask your Cloud Foundry Operator to check the platform configuration: %s", e.Err.Error()) +} diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/noaa/errors/retry_error.go b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/noaa/errors/retry_error.go new file mode 100644 index 00000000..46ef3916 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/noaa/errors/retry_error.go @@ -0,0 +1,20 @@ +package errors + +// RetryError is a type that noaa uses when it encountered an error, +// but is going to retry the operation. When errors of this type +// are encountered, they should not result in a closed connection. +type RetryError struct { + Err error +} + +// NewRetryError constructs a RetryError from any error. +func NewRetryError(err error) RetryError { + return RetryError{ + Err: err, + } +} + +// Error implements error. +func (e RetryError) Error() string { + return e.Err.Error() +} diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/sonde-go/LICENSE b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/sonde-go/LICENSE index 8f71f43f..f433b1a5 100644 --- a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/sonde-go/LICENSE +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/sonde-go/LICENSE @@ -1,3 +1,4 @@ + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -174,29 +175,3 @@ of your accepting any such warranty or additional liability. END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/sonde-go/NOTICE b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/sonde-go/NOTICE new file mode 100644 index 00000000..29ab4afc --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/sonde-go/NOTICE @@ -0,0 +1,15 @@ +sonde-go + +Copyright (c) 2015-Present CloudFoundry.org Foundation, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +Limitations under the License. diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/sonde-go/events/envelope.pb.go b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/sonde-go/events/envelope.pb.go index 5703561d..174a30d5 100644 --- a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/sonde-go/events/envelope.pb.go +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/sonde-go/events/envelope.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: envelope.proto -// DO NOT EDIT! /* Package events is a generated protocol buffer package. @@ -16,8 +15,6 @@ It has these top-level messages: Envelope Error - HttpStart - HttpStop HttpStartStop LogMessage ValueMetric @@ -43,15 +40,17 @@ var _ = math.Inf // This is a compile-time assertion to ensure that this generated file // is compatible with the proto package it is being compiled against. -const _ = proto.GoGoProtoPackageIsVersion1 +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package // / Type of the wrapped event. type Envelope_EventType int32 const ( // Removed Heartbeat at position 1 - Envelope_HttpStart Envelope_EventType = 2 - Envelope_HttpStop Envelope_EventType = 3 + // Removed HttpStart at position 2 + // Removed HttpStop at position 3 Envelope_HttpStartStop Envelope_EventType = 4 Envelope_LogMessage Envelope_EventType = 5 Envelope_ValueMetric Envelope_EventType = 6 @@ -61,8 +60,6 @@ const ( ) var Envelope_EventType_name = map[int32]string{ - 2: "HttpStart", - 3: "HttpStop", 4: "HttpStartStop", 5: "LogMessage", 6: "ValueMetric", @@ -71,8 +68,6 @@ var Envelope_EventType_name = map[int32]string{ 9: "ContainerMetric", } var Envelope_EventType_value = map[string]int32{ - "HttpStart": 2, - "HttpStop": 3, "HttpStartStop": 4, "LogMessage": 5, "ValueMetric": 6, @@ -110,8 +105,8 @@ type Envelope struct { Ip *string `protobuf:"bytes,16,opt,name=ip" json:"ip,omitempty"` Tags map[string]string `protobuf:"bytes,17,rep,name=tags" json:"tags,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` // Removed Heartbeat at position 3 - HttpStart *HttpStart `protobuf:"bytes,4,opt,name=httpStart" json:"httpStart,omitempty"` - HttpStop *HttpStop `protobuf:"bytes,5,opt,name=httpStop" json:"httpStop,omitempty"` + // Removed HttpStart at position 4 + // Removed HttpStop at position 5 HttpStartStop *HttpStartStop `protobuf:"bytes,7,opt,name=httpStartStop" json:"httpStartStop,omitempty"` LogMessage *LogMessage `protobuf:"bytes,8,opt,name=logMessage" json:"logMessage,omitempty"` ValueMetric *ValueMetric `protobuf:"bytes,9,opt,name=valueMetric" json:"valueMetric,omitempty"` @@ -137,7 +132,7 @@ func (m *Envelope) GetEventType() Envelope_EventType { if m != nil && m.EventType != nil { return *m.EventType } - return Envelope_HttpStart + return Envelope_HttpStartStop } func (m *Envelope) GetTimestamp() int64 { @@ -182,20 +177,6 @@ func (m *Envelope) GetTags() map[string]string { return nil } -func (m *Envelope) GetHttpStart() *HttpStart { - if m != nil { - return m.HttpStart - } - return nil -} - -func (m *Envelope) GetHttpStop() *HttpStop { - if m != nil { - return m.HttpStop - } - return nil -} - func (m *Envelope) GetHttpStartStop() *HttpStartStop { if m != nil { return m.HttpStartStop @@ -242,17 +223,17 @@ func init() { proto.RegisterType((*Envelope)(nil), "events.Envelope") proto.RegisterEnum("events.Envelope_EventType", Envelope_EventType_name, Envelope_EventType_value) } -func (m *Envelope) Marshal() (data []byte, err error) { +func (m *Envelope) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *Envelope) MarshalTo(data []byte) (int, error) { +func (m *Envelope) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int @@ -260,179 +241,159 @@ func (m *Envelope) MarshalTo(data []byte) (int, error) { if m.Origin == nil { return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("origin") } else { - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintEnvelope(data, i, uint64(len(*m.Origin))) - i += copy(data[i:], *m.Origin) + i = encodeVarintEnvelope(dAtA, i, uint64(len(*m.Origin))) + i += copy(dAtA[i:], *m.Origin) } if m.EventType == nil { return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("eventType") } else { - data[i] = 0x10 + dAtA[i] = 0x10 i++ - i = encodeVarintEnvelope(data, i, uint64(*m.EventType)) - } - if m.HttpStart != nil { - data[i] = 0x22 - i++ - i = encodeVarintEnvelope(data, i, uint64(m.HttpStart.Size())) - n1, err := m.HttpStart.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n1 - } - if m.HttpStop != nil { - data[i] = 0x2a - i++ - i = encodeVarintEnvelope(data, i, uint64(m.HttpStop.Size())) - n2, err := m.HttpStop.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n2 + i = encodeVarintEnvelope(dAtA, i, uint64(*m.EventType)) } if m.Timestamp != nil { - data[i] = 0x30 + dAtA[i] = 0x30 i++ - i = encodeVarintEnvelope(data, i, uint64(*m.Timestamp)) + i = encodeVarintEnvelope(dAtA, i, uint64(*m.Timestamp)) } if m.HttpStartStop != nil { - data[i] = 0x3a + dAtA[i] = 0x3a i++ - i = encodeVarintEnvelope(data, i, uint64(m.HttpStartStop.Size())) - n3, err := m.HttpStartStop.MarshalTo(data[i:]) + i = encodeVarintEnvelope(dAtA, i, uint64(m.HttpStartStop.Size())) + n1, err := m.HttpStartStop.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n3 + i += n1 } if m.LogMessage != nil { - data[i] = 0x42 + dAtA[i] = 0x42 i++ - i = encodeVarintEnvelope(data, i, uint64(m.LogMessage.Size())) - n4, err := m.LogMessage.MarshalTo(data[i:]) + i = encodeVarintEnvelope(dAtA, i, uint64(m.LogMessage.Size())) + n2, err := m.LogMessage.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n4 + i += n2 } if m.ValueMetric != nil { - data[i] = 0x4a + dAtA[i] = 0x4a i++ - i = encodeVarintEnvelope(data, i, uint64(m.ValueMetric.Size())) - n5, err := m.ValueMetric.MarshalTo(data[i:]) + i = encodeVarintEnvelope(dAtA, i, uint64(m.ValueMetric.Size())) + n3, err := m.ValueMetric.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n5 + i += n3 } if m.CounterEvent != nil { - data[i] = 0x52 + dAtA[i] = 0x52 i++ - i = encodeVarintEnvelope(data, i, uint64(m.CounterEvent.Size())) - n6, err := m.CounterEvent.MarshalTo(data[i:]) + i = encodeVarintEnvelope(dAtA, i, uint64(m.CounterEvent.Size())) + n4, err := m.CounterEvent.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n6 + i += n4 } if m.Error != nil { - data[i] = 0x5a + dAtA[i] = 0x5a i++ - i = encodeVarintEnvelope(data, i, uint64(m.Error.Size())) - n7, err := m.Error.MarshalTo(data[i:]) + i = encodeVarintEnvelope(dAtA, i, uint64(m.Error.Size())) + n5, err := m.Error.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n7 + i += n5 } if m.ContainerMetric != nil { - data[i] = 0x62 + dAtA[i] = 0x62 i++ - i = encodeVarintEnvelope(data, i, uint64(m.ContainerMetric.Size())) - n8, err := m.ContainerMetric.MarshalTo(data[i:]) + i = encodeVarintEnvelope(dAtA, i, uint64(m.ContainerMetric.Size())) + n6, err := m.ContainerMetric.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n8 + i += n6 } if m.Deployment != nil { - data[i] = 0x6a + dAtA[i] = 0x6a i++ - i = encodeVarintEnvelope(data, i, uint64(len(*m.Deployment))) - i += copy(data[i:], *m.Deployment) + i = encodeVarintEnvelope(dAtA, i, uint64(len(*m.Deployment))) + i += copy(dAtA[i:], *m.Deployment) } if m.Job != nil { - data[i] = 0x72 + dAtA[i] = 0x72 i++ - i = encodeVarintEnvelope(data, i, uint64(len(*m.Job))) - i += copy(data[i:], *m.Job) + i = encodeVarintEnvelope(dAtA, i, uint64(len(*m.Job))) + i += copy(dAtA[i:], *m.Job) } if m.Index != nil { - data[i] = 0x7a + dAtA[i] = 0x7a i++ - i = encodeVarintEnvelope(data, i, uint64(len(*m.Index))) - i += copy(data[i:], *m.Index) + i = encodeVarintEnvelope(dAtA, i, uint64(len(*m.Index))) + i += copy(dAtA[i:], *m.Index) } if m.Ip != nil { - data[i] = 0x82 + dAtA[i] = 0x82 i++ - data[i] = 0x1 + dAtA[i] = 0x1 i++ - i = encodeVarintEnvelope(data, i, uint64(len(*m.Ip))) - i += copy(data[i:], *m.Ip) + i = encodeVarintEnvelope(dAtA, i, uint64(len(*m.Ip))) + i += copy(dAtA[i:], *m.Ip) } if len(m.Tags) > 0 { for k, _ := range m.Tags { - data[i] = 0x8a + dAtA[i] = 0x8a i++ - data[i] = 0x1 + dAtA[i] = 0x1 i++ v := m.Tags[k] mapSize := 1 + len(k) + sovEnvelope(uint64(len(k))) + 1 + len(v) + sovEnvelope(uint64(len(v))) - i = encodeVarintEnvelope(data, i, uint64(mapSize)) - data[i] = 0xa + i = encodeVarintEnvelope(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa i++ - i = encodeVarintEnvelope(data, i, uint64(len(k))) - i += copy(data[i:], k) - data[i] = 0x12 + i = encodeVarintEnvelope(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 i++ - i = encodeVarintEnvelope(data, i, uint64(len(v))) - i += copy(data[i:], v) + i = encodeVarintEnvelope(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) } } if m.XXX_unrecognized != nil { - i += copy(data[i:], m.XXX_unrecognized) + i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } -func encodeFixed64Envelope(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) +func encodeFixed64Envelope(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) return offset + 8 } -func encodeFixed32Envelope(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) +func encodeFixed32Envelope(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) return offset + 4 } -func encodeVarintEnvelope(data []byte, offset int, v uint64) int { +func encodeVarintEnvelope(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) + dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } - data[offset] = uint8(v) + dAtA[offset] = uint8(v) return offset + 1 } func (m *Envelope) Size() (n int) { @@ -445,14 +406,6 @@ func (m *Envelope) Size() (n int) { if m.EventType != nil { n += 1 + sovEnvelope(uint64(*m.EventType)) } - if m.HttpStart != nil { - l = m.HttpStart.Size() - n += 1 + l + sovEnvelope(uint64(l)) - } - if m.HttpStop != nil { - l = m.HttpStop.Size() - n += 1 + l + sovEnvelope(uint64(l)) - } if m.Timestamp != nil { n += 1 + sovEnvelope(uint64(*m.Timestamp)) } @@ -523,9 +476,9 @@ func sovEnvelope(x uint64) (n int) { func sozEnvelope(x uint64) (n int) { return sovEnvelope(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (m *Envelope) Unmarshal(data []byte) error { +func (m *Envelope) Unmarshal(dAtA []byte) error { var hasFields [1]uint64 - l := len(data) + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -537,7 +490,7 @@ func (m *Envelope) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -565,7 +518,7 @@ func (m *Envelope) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -580,7 +533,7 @@ func (m *Envelope) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(data[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) m.Origin = &s iNdEx = postIndex hasFields[0] |= uint64(0x00000001) @@ -596,7 +549,7 @@ func (m *Envelope) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (Envelope_EventType(b) & 0x7F) << shift if b < 0x80 { @@ -605,72 +558,6 @@ func (m *Envelope) Unmarshal(data []byte) error { } m.EventType = &v hasFields[0] |= uint64(0x00000002) - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HttpStart", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEnvelope - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthEnvelope - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.HttpStart == nil { - m.HttpStart = &HttpStart{} - } - if err := m.HttpStart.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field HttpStop", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEnvelope - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthEnvelope - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.HttpStop == nil { - m.HttpStop = &HttpStop{} - } - if err := m.HttpStop.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex case 6: if wireType != 0 { return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) @@ -683,7 +570,7 @@ func (m *Envelope) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int64(b) & 0x7F) << shift if b < 0x80 { @@ -703,7 +590,7 @@ func (m *Envelope) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -720,7 +607,7 @@ func (m *Envelope) Unmarshal(data []byte) error { if m.HttpStartStop == nil { m.HttpStartStop = &HttpStartStop{} } - if err := m.HttpStartStop.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.HttpStartStop.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -736,7 +623,7 @@ func (m *Envelope) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -753,7 +640,7 @@ func (m *Envelope) Unmarshal(data []byte) error { if m.LogMessage == nil { m.LogMessage = &LogMessage{} } - if err := m.LogMessage.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.LogMessage.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -769,7 +656,7 @@ func (m *Envelope) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -786,7 +673,7 @@ func (m *Envelope) Unmarshal(data []byte) error { if m.ValueMetric == nil { m.ValueMetric = &ValueMetric{} } - if err := m.ValueMetric.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ValueMetric.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -802,7 +689,7 @@ func (m *Envelope) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -819,7 +706,7 @@ func (m *Envelope) Unmarshal(data []byte) error { if m.CounterEvent == nil { m.CounterEvent = &CounterEvent{} } - if err := m.CounterEvent.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.CounterEvent.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -835,7 +722,7 @@ func (m *Envelope) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -852,7 +739,7 @@ func (m *Envelope) Unmarshal(data []byte) error { if m.Error == nil { m.Error = &Error{} } - if err := m.Error.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.Error.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -868,7 +755,7 @@ func (m *Envelope) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -885,7 +772,7 @@ func (m *Envelope) Unmarshal(data []byte) error { if m.ContainerMetric == nil { m.ContainerMetric = &ContainerMetric{} } - if err := m.ContainerMetric.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ContainerMetric.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -901,7 +788,7 @@ func (m *Envelope) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -916,7 +803,7 @@ func (m *Envelope) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(data[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) m.Deployment = &s iNdEx = postIndex case 14: @@ -931,7 +818,7 @@ func (m *Envelope) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -946,7 +833,7 @@ func (m *Envelope) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(data[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) m.Job = &s iNdEx = postIndex case 15: @@ -961,7 +848,7 @@ func (m *Envelope) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -976,7 +863,7 @@ func (m *Envelope) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(data[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) m.Index = &s iNdEx = postIndex case 16: @@ -991,7 +878,7 @@ func (m *Envelope) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1006,7 +893,7 @@ func (m *Envelope) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(data[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) m.Ip = &s iNdEx = postIndex case 17: @@ -1021,7 +908,7 @@ func (m *Envelope) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1035,94 +922,101 @@ func (m *Envelope) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - var keykey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEnvelope - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - keykey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - var stringLenmapkey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEnvelope - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLenmapkey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLenmapkey := int(stringLenmapkey) - if intStringLenmapkey < 0 { - return ErrInvalidLengthEnvelope - } - postStringIndexmapkey := iNdEx + intStringLenmapkey - if postStringIndexmapkey > l { - return io.ErrUnexpectedEOF - } - mapkey := string(data[iNdEx:postStringIndexmapkey]) - iNdEx = postStringIndexmapkey - var valuekey uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEnvelope - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - valuekey |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } + if m.Tags == nil { + m.Tags = make(map[string]string) } - var stringLenmapvalue uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowEnvelope - } - if iNdEx >= l { - return io.ErrUnexpectedEOF + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvelope + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } } - b := data[iNdEx] - iNdEx++ - stringLenmapvalue |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvelope + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthEnvelope + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEnvelope + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthEnvelope + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipEnvelope(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEnvelope + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy } } - intStringLenmapvalue := int(stringLenmapvalue) - if intStringLenmapvalue < 0 { - return ErrInvalidLengthEnvelope - } - postStringIndexmapvalue := iNdEx + intStringLenmapvalue - if postStringIndexmapvalue > l { - return io.ErrUnexpectedEOF - } - mapvalue := string(data[iNdEx:postStringIndexmapvalue]) - iNdEx = postStringIndexmapvalue - if m.Tags == nil { - m.Tags = make(map[string]string) - } m.Tags[mapkey] = mapvalue iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipEnvelope(data[iNdEx:]) + skippy, err := skipEnvelope(dAtA[iNdEx:]) if err != nil { return err } @@ -1132,7 +1026,7 @@ func (m *Envelope) Unmarshal(data []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -1148,8 +1042,8 @@ func (m *Envelope) Unmarshal(data []byte) error { } return nil } -func skipEnvelope(data []byte) (n int, err error) { - l := len(data) +func skipEnvelope(dAtA []byte) (n int, err error) { + l := len(dAtA) iNdEx := 0 for iNdEx < l { var wire uint64 @@ -1160,7 +1054,7 @@ func skipEnvelope(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1178,7 +1072,7 @@ func skipEnvelope(data []byte) (n int, err error) { return 0, io.ErrUnexpectedEOF } iNdEx++ - if data[iNdEx-1] < 0x80 { + if dAtA[iNdEx-1] < 0x80 { break } } @@ -1195,7 +1089,7 @@ func skipEnvelope(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ length |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1218,7 +1112,7 @@ func skipEnvelope(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ innerWire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1229,7 +1123,7 @@ func skipEnvelope(data []byte) (n int, err error) { if innerWireType == 4 { break } - next, err := skipEnvelope(data[start:]) + next, err := skipEnvelope(dAtA[start:]) if err != nil { return 0, err } @@ -1256,40 +1150,38 @@ var ( func init() { proto.RegisterFile("envelope.proto", fileDescriptorEnvelope) } var fileDescriptorEnvelope = []byte{ - // 559 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x64, 0x52, 0xcd, 0x6e, 0xd3, 0x40, - 0x10, 0x56, 0x92, 0x26, 0xcd, 0x4e, 0x9c, 0xc4, 0x9d, 0x16, 0x58, 0x45, 0xa8, 0x2a, 0xe5, 0xd2, - 0x03, 0xb8, 0x52, 0x25, 0x44, 0x05, 0xe2, 0x40, 0x51, 0x10, 0x07, 0x7a, 0x59, 0x2a, 0xee, 0x8e, - 0xb3, 0x75, 0x0d, 0x89, 0xd7, 0x5a, 0x6f, 0x22, 0xfc, 0x12, 0x3c, 0x17, 0x47, 0x1e, 0x01, 0xf1, - 0x0e, 0xdc, 0x59, 0x8f, 0x7f, 0xd3, 0x1e, 0x56, 0xda, 0x99, 0xef, 0xfb, 0x66, 0x3e, 0xcd, 0x0c, - 0x4c, 0x64, 0xbc, 0x95, 0x2b, 0x95, 0x48, 0x2f, 0xd1, 0xca, 0x28, 0x1c, 0xc8, 0xad, 0x8c, 0x4d, - 0x3a, 0x7b, 0x19, 0x46, 0xe6, 0x6e, 0xb3, 0xf0, 0x02, 0xb5, 0x3e, 0x0f, 0x55, 0xa8, 0xce, 0x09, - 0x5e, 0x6c, 0x6e, 0x29, 0xa2, 0x80, 0x7e, 0x85, 0x6c, 0x06, 0x77, 0xc6, 0x24, 0xe5, 0x9f, 0xad, - 0x54, 0x58, 0x7e, 0x9d, 0xb5, 0x34, 0x3a, 0x0a, 0xca, 0x68, 0x24, 0xb5, 0x56, 0xba, 0x08, 0x4e, - 0xff, 0x0d, 0x60, 0x38, 0x2f, 0x7b, 0xe3, 0x63, 0x18, 0x28, 0x1d, 0x85, 0x51, 0xcc, 0x3b, 0x27, - 0xdd, 0x33, 0x26, 0xca, 0x08, 0x2f, 0x81, 0x91, 0x9f, 0x9b, 0x2c, 0x91, 0xbc, 0x6b, 0xa1, 0xc9, - 0xc5, 0xcc, 0x2b, 0x1c, 0x7a, 0x95, 0xd8, 0x9b, 0x57, 0x0c, 0xd1, 0x90, 0xf1, 0x1c, 0x58, 0x6e, - 0xe9, 0x8b, 0xf1, 0xb5, 0xe1, 0x7b, 0x27, 0x9d, 0xb3, 0xd1, 0xc5, 0x41, 0xa5, 0xfc, 0x54, 0x01, - 0xa2, 0xe1, 0xe0, 0x0b, 0x18, 0x16, 0x81, 0x4a, 0x78, 0x9f, 0xf8, 0xee, 0x2e, 0x5f, 0x25, 0xa2, - 0x66, 0xe0, 0x53, 0x60, 0x26, 0x5a, 0xcb, 0xd4, 0xf8, 0xeb, 0x84, 0x0f, 0x2c, 0xbd, 0x27, 0x9a, - 0x04, 0xbe, 0x85, 0x71, 0x5d, 0x98, 0x0a, 0xee, 0x53, 0xc1, 0x47, 0x0f, 0x0c, 0x50, 0xd5, 0x5d, - 0x2e, 0x5e, 0x00, 0xd8, 0x01, 0x5e, 0xcb, 0x34, 0xf5, 0x43, 0xc9, 0x87, 0xa4, 0xc4, 0x4a, 0xf9, - 0xb9, 0x46, 0x44, 0x8b, 0x85, 0xaf, 0x60, 0xb4, 0xf5, 0x57, 0x1b, 0x79, 0x4d, 0xe3, 0xe6, 0x8c, - 0x44, 0x87, 0x95, 0xe8, 0x6b, 0x03, 0x89, 0x36, 0xcf, 0x8e, 0xd7, 0x09, 0xd4, 0x26, 0x36, 0x52, - 0xd3, 0x0c, 0x39, 0x90, 0xee, 0xa8, 0xd2, 0x7d, 0x68, 0x61, 0x62, 0x87, 0x89, 0xcf, 0xa1, 0x4f, - 0xcb, 0xe4, 0x23, 0x92, 0x8c, 0xeb, 0xa5, 0xe4, 0x49, 0x51, 0x60, 0xf8, 0x1e, 0xa6, 0x81, 0x8a, - 0x8d, 0x1f, 0xc5, 0x52, 0x97, 0xce, 0x1c, 0xa2, 0x3f, 0x69, 0x3a, 0xec, 0xc0, 0xe2, 0x3e, 0x1f, - 0x8f, 0x01, 0x96, 0x32, 0x59, 0xa9, 0x6c, 0x9d, 0xfb, 0x1b, 0x5b, 0x35, 0x13, 0xad, 0x0c, 0xba, - 0xd0, 0xfb, 0xa6, 0x16, 0x7c, 0x42, 0x40, 0xfe, 0xc5, 0x23, 0xe8, 0x47, 0xf1, 0x52, 0xfe, 0xe0, - 0x53, 0xca, 0x15, 0x01, 0x4e, 0xa0, 0x1b, 0x25, 0xdc, 0xa5, 0x94, 0xfd, 0xa1, 0x07, 0x7b, 0xc6, - 0x0f, 0x53, 0x7e, 0x70, 0xd2, 0xb3, 0x7e, 0x1e, 0xde, 0xd4, 0x8d, 0x05, 0xe7, 0xb1, 0xd1, 0x99, - 0x20, 0xde, 0xec, 0x35, 0xb0, 0x3a, 0x95, 0x37, 0xfd, 0x2e, 0x33, 0x7b, 0xaa, 0xd4, 0xd4, 0x7e, - 0xf3, 0xa6, 0x34, 0x57, 0x7b, 0xa3, 0xd4, 0x94, 0x82, 0x37, 0xdd, 0xcb, 0xce, 0xe9, 0xcf, 0x0e, - 0xb0, 0xfa, 0x40, 0x71, 0x0c, 0xac, 0xde, 0xbd, 0xdb, 0x45, 0x07, 0x86, 0xd5, 0x6d, 0xb9, 0x3d, - 0x3c, 0x80, 0xf1, 0xce, 0x61, 0xb8, 0x7b, 0xd6, 0x36, 0x34, 0x1b, 0x77, 0xfb, 0x38, 0x85, 0x51, - 0x6b, 0x99, 0xee, 0xc0, 0x5a, 0x71, 0xda, 0x5b, 0x72, 0xf7, 0x91, 0x41, 0x9f, 0x96, 0xe0, 0x0e, - 0xf1, 0x10, 0xa6, 0xf7, 0x06, 0xec, 0xb2, 0xab, 0x77, 0xbf, 0xfe, 0x1e, 0x77, 0x7e, 0xdb, 0xf7, - 0xc7, 0x3e, 0x78, 0xa6, 0x74, 0xe8, 0x05, 0x2b, 0xb5, 0x59, 0xde, 0xda, 0x32, 0x4b, 0x9d, 0x79, - 0x4b, 0xad, 0x92, 0x54, 0xd9, 0xa1, 0x95, 0x33, 0xb9, 0x72, 0xa8, 0xf2, 0x47, 0x3f, 0x30, 0x4a, - 0x67, 0xff, 0x03, 0x00, 0x00, 0xff, 0xff, 0xca, 0x75, 0x1f, 0xa1, 0x30, 0x04, 0x00, 0x00, + // 522 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x52, 0x4f, 0x6f, 0x9b, 0x4e, + 0x10, 0x15, 0x38, 0x76, 0xcc, 0xe0, 0x3f, 0x64, 0x92, 0xdf, 0xaf, 0x2b, 0xab, 0xb2, 0x68, 0x7a, + 0xe1, 0x52, 0x22, 0x59, 0xaa, 0x6a, 0xb5, 0xea, 0xa1, 0xa9, 0x5c, 0xf5, 0xd0, 0x5c, 0x48, 0xd4, + 0x3b, 0x86, 0x0d, 0xa1, 0xc1, 0x2c, 0x5d, 0x16, 0xab, 0x7c, 0xc3, 0x1e, 0xfb, 0x11, 0x2a, 0x7f, + 0x8a, 0x1e, 0x2b, 0x06, 0xb0, 0x71, 0xd4, 0xdb, 0xbc, 0x79, 0xef, 0x31, 0xb3, 0x6f, 0x80, 0x09, + 0x4f, 0xb7, 0x3c, 0x11, 0x19, 0x77, 0x33, 0x29, 0x94, 0xc0, 0x01, 0xdf, 0xf2, 0x54, 0xe5, 0xb3, + 0x57, 0x51, 0xac, 0x1e, 0x8a, 0xb5, 0x1b, 0x88, 0xcd, 0x55, 0x24, 0x22, 0x71, 0x45, 0xf4, 0xba, + 0xb8, 0x27, 0x44, 0x80, 0xaa, 0xda, 0x36, 0x83, 0x07, 0xa5, 0xb2, 0xa6, 0x36, 0x12, 0x11, 0x35, + 0xe5, 0x68, 0xc3, 0x95, 0x8c, 0x83, 0x06, 0x99, 0x5c, 0x4a, 0x21, 0x6b, 0x70, 0xf9, 0xa7, 0x0f, + 0xc3, 0x55, 0x33, 0x1b, 0xff, 0x87, 0x81, 0x90, 0x71, 0x14, 0xa7, 0x4c, 0xb3, 0x75, 0xc7, 0xf0, + 0x1a, 0x84, 0x4b, 0x30, 0x68, 0x9f, 0xbb, 0x32, 0xe3, 0x4c, 0xb7, 0x75, 0x67, 0xb2, 0x98, 0xb9, + 0xf5, 0x86, 0x6e, 0x6b, 0x76, 0x57, 0xad, 0xc2, 0x3b, 0x88, 0xf1, 0x39, 0x18, 0x2a, 0xde, 0xf0, + 0x5c, 0xf9, 0x9b, 0x8c, 0x0d, 0x6c, 0xcd, 0xe9, 0x79, 0x87, 0x06, 0xbe, 0x83, 0x71, 0xb5, 0xf0, + 0xad, 0xf2, 0xa5, 0xba, 0x55, 0x22, 0x63, 0xa7, 0xb6, 0xe6, 0x98, 0x8b, 0xff, 0xda, 0x6f, 0x7f, + 0xee, 0x92, 0xde, 0xb1, 0x16, 0x17, 0x00, 0x89, 0x88, 0x6e, 0x78, 0x9e, 0xfb, 0x11, 0x67, 0x43, + 0x72, 0x62, 0xeb, 0xfc, 0xb2, 0x67, 0xbc, 0x8e, 0x0a, 0x5f, 0x83, 0xb9, 0xf5, 0x93, 0x82, 0xdf, + 0x50, 0x1e, 0xcc, 0x20, 0xd3, 0x79, 0x6b, 0xfa, 0x7a, 0xa0, 0xbc, 0xae, 0x0e, 0x97, 0x30, 0x0a, + 0x44, 0x91, 0x2a, 0x2e, 0xe9, 0x91, 0x0c, 0xc8, 0x77, 0xd1, 0xfa, 0x3e, 0x76, 0x38, 0xef, 0x48, + 0x89, 0x2f, 0xa1, 0x4f, 0x69, 0x33, 0x93, 0x2c, 0xe3, 0x7d, 0x6a, 0x55, 0xd3, 0xab, 0x39, 0xfc, + 0x00, 0xd3, 0x40, 0xa4, 0xca, 0x8f, 0x53, 0x2e, 0x9b, 0xcd, 0x46, 0x24, 0x7f, 0x76, 0x98, 0x70, + 0x44, 0x7b, 0x4f, 0xf5, 0x38, 0x07, 0x08, 0x79, 0x96, 0x88, 0x72, 0x53, 0xed, 0x37, 0xb6, 0x35, + 0xc7, 0xf0, 0x3a, 0x1d, 0xb4, 0xa0, 0xf7, 0x4d, 0xac, 0xd9, 0x84, 0x88, 0xaa, 0xc4, 0x0b, 0xe8, + 0xc7, 0x69, 0xc8, 0x7f, 0xb0, 0x29, 0xf5, 0x6a, 0x80, 0x13, 0xd0, 0xe3, 0x8c, 0x59, 0xd4, 0xd2, + 0xe3, 0x0c, 0x5d, 0x38, 0x51, 0x7e, 0x94, 0xb3, 0x33, 0xbb, 0xe7, 0x98, 0xff, 0x38, 0xfa, 0x9d, + 0x1f, 0xe5, 0xab, 0x54, 0xc9, 0xd2, 0x23, 0xdd, 0xec, 0x0d, 0x18, 0xfb, 0x56, 0x35, 0xf4, 0x91, + 0x97, 0x4c, 0xab, 0x87, 0x3e, 0xf2, 0xb2, 0x1a, 0x4a, 0xb9, 0x32, 0xbd, 0x1e, 0x4a, 0xe0, 0xad, + 0xbe, 0xd4, 0x2e, 0xbf, 0x83, 0xb1, 0xff, 0x81, 0xf0, 0x0c, 0xc6, 0x47, 0xa7, 0xb7, 0x4e, 0x70, + 0x02, 0x70, 0xb8, 0xa9, 0xd5, 0xc7, 0x29, 0x98, 0x9d, 0x73, 0x59, 0x03, 0xb4, 0x60, 0xd4, 0xbd, + 0x83, 0x75, 0x8a, 0x06, 0xf4, 0x29, 0x66, 0x6b, 0x88, 0xe7, 0x30, 0x7d, 0x12, 0xa1, 0x65, 0x5c, + 0xbf, 0xff, 0xb9, 0x9b, 0x6b, 0xbf, 0x76, 0x73, 0xed, 0xf7, 0x6e, 0xae, 0xc1, 0x0b, 0x21, 0x23, + 0x37, 0x48, 0x44, 0x11, 0xde, 0x8b, 0x22, 0x0d, 0x65, 0xe9, 0x86, 0x52, 0x64, 0xb9, 0x48, 0x43, + 0xde, 0xbc, 0xfa, 0x7a, 0x44, 0x5f, 0xfe, 0xe4, 0x07, 0x4a, 0xc8, 0xf2, 0x6f, 0x00, 0x00, 0x00, + 0xff, 0xff, 0x44, 0xf5, 0x54, 0xc4, 0xb3, 0x03, 0x00, 0x00, } diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/sonde-go/events/error.pb.go b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/sonde-go/events/error.pb.go index 7e47deb8..727417c6 100644 --- a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/sonde-go/events/error.pb.go +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/sonde-go/events/error.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: error.proto -// DO NOT EDIT! package events @@ -55,17 +54,17 @@ func (m *Error) GetMessage() string { func init() { proto.RegisterType((*Error)(nil), "events.Error") } -func (m *Error) Marshal() (data []byte, err error) { +func (m *Error) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *Error) MarshalTo(data []byte) (int, error) { +func (m *Error) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int @@ -73,57 +72,57 @@ func (m *Error) MarshalTo(data []byte) (int, error) { if m.Source == nil { return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("source") } else { - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintError(data, i, uint64(len(*m.Source))) - i += copy(data[i:], *m.Source) + i = encodeVarintError(dAtA, i, uint64(len(*m.Source))) + i += copy(dAtA[i:], *m.Source) } if m.Code == nil { return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("code") } else { - data[i] = 0x10 + dAtA[i] = 0x10 i++ - i = encodeVarintError(data, i, uint64(*m.Code)) + i = encodeVarintError(dAtA, i, uint64(*m.Code)) } if m.Message == nil { return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("message") } else { - data[i] = 0x1a + dAtA[i] = 0x1a i++ - i = encodeVarintError(data, i, uint64(len(*m.Message))) - i += copy(data[i:], *m.Message) + i = encodeVarintError(dAtA, i, uint64(len(*m.Message))) + i += copy(dAtA[i:], *m.Message) } if m.XXX_unrecognized != nil { - i += copy(data[i:], m.XXX_unrecognized) + i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } -func encodeFixed64Error(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) +func encodeFixed64Error(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) return offset + 8 } -func encodeFixed32Error(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) +func encodeFixed32Error(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) return offset + 4 } -func encodeVarintError(data []byte, offset int, v uint64) int { +func encodeVarintError(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) + dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } - data[offset] = uint8(v) + dAtA[offset] = uint8(v) return offset + 1 } func (m *Error) Size() (n int) { @@ -159,9 +158,9 @@ func sovError(x uint64) (n int) { func sozError(x uint64) (n int) { return sovError(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (m *Error) Unmarshal(data []byte) error { +func (m *Error) Unmarshal(dAtA []byte) error { var hasFields [1]uint64 - l := len(data) + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -173,7 +172,7 @@ func (m *Error) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -201,7 +200,7 @@ func (m *Error) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -216,7 +215,7 @@ func (m *Error) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(data[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) m.Source = &s iNdEx = postIndex hasFields[0] |= uint64(0x00000001) @@ -232,7 +231,7 @@ func (m *Error) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -253,7 +252,7 @@ func (m *Error) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -268,13 +267,13 @@ func (m *Error) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(data[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) m.Message = &s iNdEx = postIndex hasFields[0] |= uint64(0x00000004) default: iNdEx = preIndex - skippy, err := skipError(data[iNdEx:]) + skippy, err := skipError(dAtA[iNdEx:]) if err != nil { return err } @@ -284,7 +283,7 @@ func (m *Error) Unmarshal(data []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -303,8 +302,8 @@ func (m *Error) Unmarshal(data []byte) error { } return nil } -func skipError(data []byte) (n int, err error) { - l := len(data) +func skipError(dAtA []byte) (n int, err error) { + l := len(dAtA) iNdEx := 0 for iNdEx < l { var wire uint64 @@ -315,7 +314,7 @@ func skipError(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -333,7 +332,7 @@ func skipError(data []byte) (n int, err error) { return 0, io.ErrUnexpectedEOF } iNdEx++ - if data[iNdEx-1] < 0x80 { + if dAtA[iNdEx-1] < 0x80 { break } } @@ -350,7 +349,7 @@ func skipError(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ length |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -373,7 +372,7 @@ func skipError(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ innerWire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -384,7 +383,7 @@ func skipError(data []byte) (n int, err error) { if innerWireType == 4 { break } - next, err := skipError(data[start:]) + next, err := skipError(dAtA[start:]) if err != nil { return 0, err } @@ -411,17 +410,17 @@ var ( func init() { proto.RegisterFile("error.proto", fileDescriptorError) } var fileDescriptorError = []byte{ - // 187 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x4e, 0x2d, 0x2a, 0xca, + // 192 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x4e, 0x2d, 0x2a, 0xca, 0x2f, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x4b, 0x2d, 0x4b, 0xcd, 0x2b, 0x29, 0x96, 0xd2, 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0x4f, 0xcf, 0xd7, 0x07, 0x4b, 0x27, 0x95, 0xa6, 0x81, 0x79, 0x60, 0x0e, 0x98, 0x05, 0xd1, 0xa6, 0xe4, 0xcb, 0xc5, 0xea, 0x0a, 0x32, 0x45, 0x48, 0x8c, 0x8b, 0xad, 0x38, 0xbf, 0xb4, 0x28, 0x39, 0x55, 0x82, 0x51, 0x81, 0x49, 0x83, 0x33, 0x08, 0xca, 0x13, 0x12, 0xe2, 0x62, 0x49, 0xce, 0x4f, 0x49, 0x95, - 0x60, 0x02, 0x8a, 0xb2, 0x06, 0x81, 0xd9, 0x42, 0x12, 0x5c, 0xec, 0xb9, 0xa9, 0xc5, 0xc5, 0x89, - 0xe9, 0xa9, 0x12, 0xcc, 0x60, 0xc5, 0x30, 0xae, 0x93, 0xed, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x80, - 0xf8, 0x01, 0x10, 0x73, 0x29, 0xe6, 0x17, 0xa5, 0xeb, 0x25, 0xe7, 0xe4, 0x97, 0xa6, 0xa4, 0xe5, - 0x97, 0xe6, 0xa5, 0x14, 0x55, 0xea, 0xa5, 0x14, 0xe5, 0x17, 0x14, 0xe7, 0xe7, 0xa5, 0xa4, 0xea, - 0x41, 0x9c, 0xeb, 0xc4, 0x03, 0xb6, 0xdd, 0x2d, 0x31, 0xb9, 0x24, 0xbf, 0xa8, 0x12, 0x10, 0x00, - 0x00, 0xff, 0xff, 0x46, 0xed, 0x44, 0xa1, 0xd2, 0x00, 0x00, 0x00, + 0x60, 0x52, 0x60, 0xd2, 0x60, 0x0d, 0x02, 0xb3, 0x85, 0x24, 0xb8, 0xd8, 0x73, 0x53, 0x8b, 0x8b, + 0x13, 0xd3, 0x53, 0x25, 0x98, 0xc1, 0x8a, 0x61, 0x5c, 0x27, 0xdb, 0x13, 0x8f, 0xe4, 0x18, 0x2f, + 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x91, 0x4b, 0x31, 0xbf, 0x28, 0x5d, 0x2f, 0x39, 0x27, + 0xbf, 0x34, 0x25, 0x2d, 0xbf, 0x34, 0x2f, 0xa5, 0xa8, 0x52, 0x2f, 0xa5, 0x28, 0xbf, 0xa0, 0x38, + 0x3f, 0x2f, 0x25, 0x55, 0x0f, 0xe2, 0x5c, 0x27, 0x1e, 0xb0, 0xed, 0x6e, 0x89, 0xc9, 0x25, 0xf9, + 0x45, 0x95, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x46, 0xed, 0x44, 0xa1, 0xd2, 0x00, 0x00, 0x00, } diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/sonde-go/events/events_easyjson.go b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/sonde-go/events/events_easyjson.go new file mode 100644 index 00000000..71b815c4 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/sonde-go/events/events_easyjson.go @@ -0,0 +1,1565 @@ +// Code generated by easyjson for marshaling/unmarshaling. DO NOT EDIT. + +package events + +import ( + json "encoding/json" + easyjson "github.com/mailru/easyjson" + jlexer "github.com/mailru/easyjson/jlexer" + jwriter "github.com/mailru/easyjson/jwriter" +) + +// suppress unused package warning +var ( + _ *json.RawMessage + _ *jlexer.Lexer + _ *jwriter.Writer + _ easyjson.Marshaler +) + +func easyjson692db02bDecodeGithubComCloudfoundrySondeGoEvents(in *jlexer.Lexer, out *ValueMetric) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeString() + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "name": + if in.IsNull() { + in.Skip() + out.Name = nil + } else { + if out.Name == nil { + out.Name = new(string) + } + *out.Name = string(in.String()) + } + case "value": + if in.IsNull() { + in.Skip() + out.Value = nil + } else { + if out.Value == nil { + out.Value = new(float64) + } + *out.Value = float64(in.Float64()) + } + case "unit": + if in.IsNull() { + in.Skip() + out.Unit = nil + } else { + if out.Unit == nil { + out.Unit = new(string) + } + *out.Unit = string(in.String()) + } + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjson692db02bEncodeGithubComCloudfoundrySondeGoEvents(out *jwriter.Writer, in ValueMetric) { + out.RawByte('{') + first := true + _ = first + if in.Name != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"name\":") + if in.Name == nil { + out.RawString("null") + } else { + out.String(string(*in.Name)) + } + } + if in.Value != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"value\":") + if in.Value == nil { + out.RawString("null") + } else { + out.Float64(float64(*in.Value)) + } + } + if in.Unit != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"unit\":") + if in.Unit == nil { + out.RawString("null") + } else { + out.String(string(*in.Unit)) + } + } + out.RawByte('}') +} + +// MarshalEasyJSON supports easyjson.Marshaler interface +func (v ValueMetric) MarshalEasyJSON(w *jwriter.Writer) { + easyjson692db02bEncodeGithubComCloudfoundrySondeGoEvents(w, v) +} + +// UnmarshalEasyJSON supports easyjson.Unmarshaler interface +func (v *ValueMetric) UnmarshalEasyJSON(l *jlexer.Lexer) { + easyjson692db02bDecodeGithubComCloudfoundrySondeGoEvents(l, v) +} +func easyjson692db02bDecodeGithubComCloudfoundrySondeGoEvents1(in *jlexer.Lexer, out *UUID) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeString() + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "low": + if in.IsNull() { + in.Skip() + out.Low = nil + } else { + if out.Low == nil { + out.Low = new(uint64) + } + *out.Low = uint64(in.Uint64()) + } + case "high": + if in.IsNull() { + in.Skip() + out.High = nil + } else { + if out.High == nil { + out.High = new(uint64) + } + *out.High = uint64(in.Uint64()) + } + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjson692db02bEncodeGithubComCloudfoundrySondeGoEvents1(out *jwriter.Writer, in UUID) { + out.RawByte('{') + first := true + _ = first + if in.Low != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"low\":") + if in.Low == nil { + out.RawString("null") + } else { + out.Uint64(uint64(*in.Low)) + } + } + if in.High != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"high\":") + if in.High == nil { + out.RawString("null") + } else { + out.Uint64(uint64(*in.High)) + } + } + out.RawByte('}') +} + +// MarshalEasyJSON supports easyjson.Marshaler interface +func (v UUID) MarshalEasyJSON(w *jwriter.Writer) { + easyjson692db02bEncodeGithubComCloudfoundrySondeGoEvents1(w, v) +} + +// UnmarshalEasyJSON supports easyjson.Unmarshaler interface +func (v *UUID) UnmarshalEasyJSON(l *jlexer.Lexer) { + easyjson692db02bDecodeGithubComCloudfoundrySondeGoEvents1(l, v) +} +func easyjson692db02bDecodeGithubComCloudfoundrySondeGoEvents2(in *jlexer.Lexer, out *LogMessage) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeString() + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "message": + if in.IsNull() { + in.Skip() + out.Message = nil + } else { + out.Message = in.Bytes() + } + case "message_type": + if in.IsNull() { + in.Skip() + out.MessageType = nil + } else { + if out.MessageType == nil { + out.MessageType = new(LogMessage_MessageType) + } + if data := in.Raw(); in.Ok() { + in.AddError((*out.MessageType).UnmarshalJSON(data)) + } + } + case "timestamp": + if in.IsNull() { + in.Skip() + out.Timestamp = nil + } else { + if out.Timestamp == nil { + out.Timestamp = new(int64) + } + *out.Timestamp = int64(in.Int64()) + } + case "app_id": + if in.IsNull() { + in.Skip() + out.AppId = nil + } else { + if out.AppId == nil { + out.AppId = new(string) + } + *out.AppId = string(in.String()) + } + case "source_type": + if in.IsNull() { + in.Skip() + out.SourceType = nil + } else { + if out.SourceType == nil { + out.SourceType = new(string) + } + *out.SourceType = string(in.String()) + } + case "source_instance": + if in.IsNull() { + in.Skip() + out.SourceInstance = nil + } else { + if out.SourceInstance == nil { + out.SourceInstance = new(string) + } + *out.SourceInstance = string(in.String()) + } + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjson692db02bEncodeGithubComCloudfoundrySondeGoEvents2(out *jwriter.Writer, in LogMessage) { + out.RawByte('{') + first := true + _ = first + if len(in.Message) != 0 { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"message\":") + out.Base64Bytes(in.Message) + } + if in.MessageType != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"message_type\":") + if in.MessageType == nil { + out.RawString("null") + } else { + out.Int32(int32(*in.MessageType)) + } + } + if in.Timestamp != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"timestamp\":") + if in.Timestamp == nil { + out.RawString("null") + } else { + out.Int64(int64(*in.Timestamp)) + } + } + if in.AppId != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"app_id\":") + if in.AppId == nil { + out.RawString("null") + } else { + out.String(string(*in.AppId)) + } + } + if in.SourceType != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"source_type\":") + if in.SourceType == nil { + out.RawString("null") + } else { + out.String(string(*in.SourceType)) + } + } + if in.SourceInstance != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"source_instance\":") + if in.SourceInstance == nil { + out.RawString("null") + } else { + out.String(string(*in.SourceInstance)) + } + } + out.RawByte('}') +} + +// MarshalEasyJSON supports easyjson.Marshaler interface +func (v LogMessage) MarshalEasyJSON(w *jwriter.Writer) { + easyjson692db02bEncodeGithubComCloudfoundrySondeGoEvents2(w, v) +} + +// UnmarshalEasyJSON supports easyjson.Unmarshaler interface +func (v *LogMessage) UnmarshalEasyJSON(l *jlexer.Lexer) { + easyjson692db02bDecodeGithubComCloudfoundrySondeGoEvents2(l, v) +} +func easyjson692db02bDecodeGithubComCloudfoundrySondeGoEvents3(in *jlexer.Lexer, out *HttpStartStop) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeString() + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "startTimestamp": + if in.IsNull() { + in.Skip() + out.StartTimestamp = nil + } else { + if out.StartTimestamp == nil { + out.StartTimestamp = new(int64) + } + *out.StartTimestamp = int64(in.Int64()) + } + case "stopTimestamp": + if in.IsNull() { + in.Skip() + out.StopTimestamp = nil + } else { + if out.StopTimestamp == nil { + out.StopTimestamp = new(int64) + } + *out.StopTimestamp = int64(in.Int64()) + } + case "requestId": + if in.IsNull() { + in.Skip() + out.RequestId = nil + } else { + if out.RequestId == nil { + out.RequestId = new(UUID) + } + (*out.RequestId).UnmarshalEasyJSON(in) + } + case "peerType": + if in.IsNull() { + in.Skip() + out.PeerType = nil + } else { + if out.PeerType == nil { + out.PeerType = new(PeerType) + } + if data := in.Raw(); in.Ok() { + in.AddError((*out.PeerType).UnmarshalJSON(data)) + } + } + case "method": + if in.IsNull() { + in.Skip() + out.Method = nil + } else { + if out.Method == nil { + out.Method = new(Method) + } + if data := in.Raw(); in.Ok() { + in.AddError((*out.Method).UnmarshalJSON(data)) + } + } + case "uri": + if in.IsNull() { + in.Skip() + out.Uri = nil + } else { + if out.Uri == nil { + out.Uri = new(string) + } + *out.Uri = string(in.String()) + } + case "remoteAddress": + if in.IsNull() { + in.Skip() + out.RemoteAddress = nil + } else { + if out.RemoteAddress == nil { + out.RemoteAddress = new(string) + } + *out.RemoteAddress = string(in.String()) + } + case "userAgent": + if in.IsNull() { + in.Skip() + out.UserAgent = nil + } else { + if out.UserAgent == nil { + out.UserAgent = new(string) + } + *out.UserAgent = string(in.String()) + } + case "statusCode": + if in.IsNull() { + in.Skip() + out.StatusCode = nil + } else { + if out.StatusCode == nil { + out.StatusCode = new(int32) + } + *out.StatusCode = int32(in.Int32()) + } + case "contentLength": + if in.IsNull() { + in.Skip() + out.ContentLength = nil + } else { + if out.ContentLength == nil { + out.ContentLength = new(int64) + } + *out.ContentLength = int64(in.Int64()) + } + case "applicationId": + if in.IsNull() { + in.Skip() + out.ApplicationId = nil + } else { + if out.ApplicationId == nil { + out.ApplicationId = new(UUID) + } + (*out.ApplicationId).UnmarshalEasyJSON(in) + } + case "instanceIndex": + if in.IsNull() { + in.Skip() + out.InstanceIndex = nil + } else { + if out.InstanceIndex == nil { + out.InstanceIndex = new(int32) + } + *out.InstanceIndex = int32(in.Int32()) + } + case "instanceId": + if in.IsNull() { + in.Skip() + out.InstanceId = nil + } else { + if out.InstanceId == nil { + out.InstanceId = new(string) + } + *out.InstanceId = string(in.String()) + } + case "forwarded": + if in.IsNull() { + in.Skip() + out.Forwarded = nil + } else { + in.Delim('[') + if out.Forwarded == nil { + if !in.IsDelim(']') { + out.Forwarded = make([]string, 0, 4) + } else { + out.Forwarded = []string{} + } + } else { + out.Forwarded = (out.Forwarded)[:0] + } + for !in.IsDelim(']') { + var v4 string + v4 = string(in.String()) + out.Forwarded = append(out.Forwarded, v4) + in.WantComma() + } + in.Delim(']') + } + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjson692db02bEncodeGithubComCloudfoundrySondeGoEvents3(out *jwriter.Writer, in HttpStartStop) { + out.RawByte('{') + first := true + _ = first + if in.StartTimestamp != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"startTimestamp\":") + if in.StartTimestamp == nil { + out.RawString("null") + } else { + out.Int64(int64(*in.StartTimestamp)) + } + } + if in.StopTimestamp != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"stopTimestamp\":") + if in.StopTimestamp == nil { + out.RawString("null") + } else { + out.Int64(int64(*in.StopTimestamp)) + } + } + if in.RequestId != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"requestId\":") + if in.RequestId == nil { + out.RawString("null") + } else { + (*in.RequestId).MarshalEasyJSON(out) + } + } + if in.PeerType != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"peerType\":") + if in.PeerType == nil { + out.RawString("null") + } else { + out.Int32(int32(*in.PeerType)) + } + } + if in.Method != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"method\":") + if in.Method == nil { + out.RawString("null") + } else { + out.Int32(int32(*in.Method)) + } + } + if in.Uri != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"uri\":") + if in.Uri == nil { + out.RawString("null") + } else { + out.String(string(*in.Uri)) + } + } + if in.RemoteAddress != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"remoteAddress\":") + if in.RemoteAddress == nil { + out.RawString("null") + } else { + out.String(string(*in.RemoteAddress)) + } + } + if in.UserAgent != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"userAgent\":") + if in.UserAgent == nil { + out.RawString("null") + } else { + out.String(string(*in.UserAgent)) + } + } + if in.StatusCode != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"statusCode\":") + if in.StatusCode == nil { + out.RawString("null") + } else { + out.Int32(int32(*in.StatusCode)) + } + } + if in.ContentLength != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"contentLength\":") + if in.ContentLength == nil { + out.RawString("null") + } else { + out.Int64(int64(*in.ContentLength)) + } + } + if in.ApplicationId != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"applicationId\":") + if in.ApplicationId == nil { + out.RawString("null") + } else { + (*in.ApplicationId).MarshalEasyJSON(out) + } + } + if in.InstanceIndex != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"instanceIndex\":") + if in.InstanceIndex == nil { + out.RawString("null") + } else { + out.Int32(int32(*in.InstanceIndex)) + } + } + if in.InstanceId != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"instanceId\":") + if in.InstanceId == nil { + out.RawString("null") + } else { + out.String(string(*in.InstanceId)) + } + } + if len(in.Forwarded) != 0 { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"forwarded\":") + if in.Forwarded == nil && (out.Flags&jwriter.NilSliceAsEmpty) == 0 { + out.RawString("null") + } else { + out.RawByte('[') + for v5, v6 := range in.Forwarded { + if v5 > 0 { + out.RawByte(',') + } + out.String(string(v6)) + } + out.RawByte(']') + } + } + out.RawByte('}') +} + +// MarshalEasyJSON supports easyjson.Marshaler interface +func (v HttpStartStop) MarshalEasyJSON(w *jwriter.Writer) { + easyjson692db02bEncodeGithubComCloudfoundrySondeGoEvents3(w, v) +} + +// UnmarshalEasyJSON supports easyjson.Unmarshaler interface +func (v *HttpStartStop) UnmarshalEasyJSON(l *jlexer.Lexer) { + easyjson692db02bDecodeGithubComCloudfoundrySondeGoEvents3(l, v) +} +func easyjson692db02bDecodeGithubComCloudfoundrySondeGoEvents4(in *jlexer.Lexer, out *Error) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeString() + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "source": + if in.IsNull() { + in.Skip() + out.Source = nil + } else { + if out.Source == nil { + out.Source = new(string) + } + *out.Source = string(in.String()) + } + case "code": + if in.IsNull() { + in.Skip() + out.Code = nil + } else { + if out.Code == nil { + out.Code = new(int32) + } + *out.Code = int32(in.Int32()) + } + case "message": + if in.IsNull() { + in.Skip() + out.Message = nil + } else { + if out.Message == nil { + out.Message = new(string) + } + *out.Message = string(in.String()) + } + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjson692db02bEncodeGithubComCloudfoundrySondeGoEvents4(out *jwriter.Writer, in Error) { + out.RawByte('{') + first := true + _ = first + if in.Source != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"source\":") + if in.Source == nil { + out.RawString("null") + } else { + out.String(string(*in.Source)) + } + } + if in.Code != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"code\":") + if in.Code == nil { + out.RawString("null") + } else { + out.Int32(int32(*in.Code)) + } + } + if in.Message != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"message\":") + if in.Message == nil { + out.RawString("null") + } else { + out.String(string(*in.Message)) + } + } + out.RawByte('}') +} + +// MarshalEasyJSON supports easyjson.Marshaler interface +func (v Error) MarshalEasyJSON(w *jwriter.Writer) { + easyjson692db02bEncodeGithubComCloudfoundrySondeGoEvents4(w, v) +} + +// UnmarshalEasyJSON supports easyjson.Unmarshaler interface +func (v *Error) UnmarshalEasyJSON(l *jlexer.Lexer) { + easyjson692db02bDecodeGithubComCloudfoundrySondeGoEvents4(l, v) +} +func easyjson692db02bDecodeGithubComCloudfoundrySondeGoEvents5(in *jlexer.Lexer, out *Envelope) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeString() + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "origin": + if in.IsNull() { + in.Skip() + out.Origin = nil + } else { + if out.Origin == nil { + out.Origin = new(string) + } + *out.Origin = string(in.String()) + } + case "eventType": + if in.IsNull() { + in.Skip() + out.EventType = nil + } else { + if out.EventType == nil { + out.EventType = new(Envelope_EventType) + } + if data := in.Raw(); in.Ok() { + in.AddError((*out.EventType).UnmarshalJSON(data)) + } + } + case "timestamp": + if in.IsNull() { + in.Skip() + out.Timestamp = nil + } else { + if out.Timestamp == nil { + out.Timestamp = new(int64) + } + *out.Timestamp = int64(in.Int64()) + } + case "deployment": + if in.IsNull() { + in.Skip() + out.Deployment = nil + } else { + if out.Deployment == nil { + out.Deployment = new(string) + } + *out.Deployment = string(in.String()) + } + case "job": + if in.IsNull() { + in.Skip() + out.Job = nil + } else { + if out.Job == nil { + out.Job = new(string) + } + *out.Job = string(in.String()) + } + case "index": + if in.IsNull() { + in.Skip() + out.Index = nil + } else { + if out.Index == nil { + out.Index = new(string) + } + *out.Index = string(in.String()) + } + case "ip": + if in.IsNull() { + in.Skip() + out.Ip = nil + } else { + if out.Ip == nil { + out.Ip = new(string) + } + *out.Ip = string(in.String()) + } + case "tags": + if in.IsNull() { + in.Skip() + } else { + in.Delim('{') + if !in.IsDelim('}') { + out.Tags = make(map[string]string) + } else { + out.Tags = nil + } + for !in.IsDelim('}') { + key := string(in.String()) + in.WantColon() + var v7 string + v7 = string(in.String()) + (out.Tags)[key] = v7 + in.WantComma() + } + in.Delim('}') + } + case "httpStartStop": + if in.IsNull() { + in.Skip() + out.HttpStartStop = nil + } else { + if out.HttpStartStop == nil { + out.HttpStartStop = new(HttpStartStop) + } + (*out.HttpStartStop).UnmarshalEasyJSON(in) + } + case "logMessage": + if in.IsNull() { + in.Skip() + out.LogMessage = nil + } else { + if out.LogMessage == nil { + out.LogMessage = new(LogMessage) + } + (*out.LogMessage).UnmarshalEasyJSON(in) + } + case "valueMetric": + if in.IsNull() { + in.Skip() + out.ValueMetric = nil + } else { + if out.ValueMetric == nil { + out.ValueMetric = new(ValueMetric) + } + (*out.ValueMetric).UnmarshalEasyJSON(in) + } + case "counterEvent": + if in.IsNull() { + in.Skip() + out.CounterEvent = nil + } else { + if out.CounterEvent == nil { + out.CounterEvent = new(CounterEvent) + } + (*out.CounterEvent).UnmarshalEasyJSON(in) + } + case "error": + if in.IsNull() { + in.Skip() + out.Error = nil + } else { + if out.Error == nil { + out.Error = new(Error) + } + (*out.Error).UnmarshalEasyJSON(in) + } + case "containerMetric": + if in.IsNull() { + in.Skip() + out.ContainerMetric = nil + } else { + if out.ContainerMetric == nil { + out.ContainerMetric = new(ContainerMetric) + } + (*out.ContainerMetric).UnmarshalEasyJSON(in) + } + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjson692db02bEncodeGithubComCloudfoundrySondeGoEvents5(out *jwriter.Writer, in Envelope) { + out.RawByte('{') + first := true + _ = first + if in.Origin != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"origin\":") + if in.Origin == nil { + out.RawString("null") + } else { + out.String(string(*in.Origin)) + } + } + if in.EventType != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"eventType\":") + if in.EventType == nil { + out.RawString("null") + } else { + out.Int32(int32(*in.EventType)) + } + } + if in.Timestamp != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"timestamp\":") + if in.Timestamp == nil { + out.RawString("null") + } else { + out.Int64(int64(*in.Timestamp)) + } + } + if in.Deployment != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"deployment\":") + if in.Deployment == nil { + out.RawString("null") + } else { + out.String(string(*in.Deployment)) + } + } + if in.Job != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"job\":") + if in.Job == nil { + out.RawString("null") + } else { + out.String(string(*in.Job)) + } + } + if in.Index != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"index\":") + if in.Index == nil { + out.RawString("null") + } else { + out.String(string(*in.Index)) + } + } + if in.Ip != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"ip\":") + if in.Ip == nil { + out.RawString("null") + } else { + out.String(string(*in.Ip)) + } + } + if len(in.Tags) != 0 { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"tags\":") + if in.Tags == nil && (out.Flags&jwriter.NilMapAsEmpty) == 0 { + out.RawString(`null`) + } else { + out.RawByte('{') + v8First := true + for v8Name, v8Value := range in.Tags { + if !v8First { + out.RawByte(',') + } + v8First = false + out.String(string(v8Name)) + out.RawByte(':') + out.String(string(v8Value)) + } + out.RawByte('}') + } + } + if in.HttpStartStop != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"httpStartStop\":") + if in.HttpStartStop == nil { + out.RawString("null") + } else { + (*in.HttpStartStop).MarshalEasyJSON(out) + } + } + if in.LogMessage != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"logMessage\":") + if in.LogMessage == nil { + out.RawString("null") + } else { + (*in.LogMessage).MarshalEasyJSON(out) + } + } + if in.ValueMetric != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"valueMetric\":") + if in.ValueMetric == nil { + out.RawString("null") + } else { + (*in.ValueMetric).MarshalEasyJSON(out) + } + } + if in.CounterEvent != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"counterEvent\":") + if in.CounterEvent == nil { + out.RawString("null") + } else { + (*in.CounterEvent).MarshalEasyJSON(out) + } + } + if in.Error != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"error\":") + if in.Error == nil { + out.RawString("null") + } else { + (*in.Error).MarshalEasyJSON(out) + } + } + if in.ContainerMetric != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"containerMetric\":") + if in.ContainerMetric == nil { + out.RawString("null") + } else { + (*in.ContainerMetric).MarshalEasyJSON(out) + } + } + out.RawByte('}') +} + +// MarshalEasyJSON supports easyjson.Marshaler interface +func (v Envelope) MarshalEasyJSON(w *jwriter.Writer) { + easyjson692db02bEncodeGithubComCloudfoundrySondeGoEvents5(w, v) +} + +// UnmarshalEasyJSON supports easyjson.Unmarshaler interface +func (v *Envelope) UnmarshalEasyJSON(l *jlexer.Lexer) { + easyjson692db02bDecodeGithubComCloudfoundrySondeGoEvents5(l, v) +} +func easyjson692db02bDecodeGithubComCloudfoundrySondeGoEvents6(in *jlexer.Lexer, out *CounterEvent) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeString() + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "name": + if in.IsNull() { + in.Skip() + out.Name = nil + } else { + if out.Name == nil { + out.Name = new(string) + } + *out.Name = string(in.String()) + } + case "delta": + if in.IsNull() { + in.Skip() + out.Delta = nil + } else { + if out.Delta == nil { + out.Delta = new(uint64) + } + *out.Delta = uint64(in.Uint64()) + } + case "total": + if in.IsNull() { + in.Skip() + out.Total = nil + } else { + if out.Total == nil { + out.Total = new(uint64) + } + *out.Total = uint64(in.Uint64()) + } + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjson692db02bEncodeGithubComCloudfoundrySondeGoEvents6(out *jwriter.Writer, in CounterEvent) { + out.RawByte('{') + first := true + _ = first + if in.Name != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"name\":") + if in.Name == nil { + out.RawString("null") + } else { + out.String(string(*in.Name)) + } + } + if in.Delta != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"delta\":") + if in.Delta == nil { + out.RawString("null") + } else { + out.Uint64(uint64(*in.Delta)) + } + } + if in.Total != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"total\":") + if in.Total == nil { + out.RawString("null") + } else { + out.Uint64(uint64(*in.Total)) + } + } + out.RawByte('}') +} + +// MarshalEasyJSON supports easyjson.Marshaler interface +func (v CounterEvent) MarshalEasyJSON(w *jwriter.Writer) { + easyjson692db02bEncodeGithubComCloudfoundrySondeGoEvents6(w, v) +} + +// UnmarshalEasyJSON supports easyjson.Unmarshaler interface +func (v *CounterEvent) UnmarshalEasyJSON(l *jlexer.Lexer) { + easyjson692db02bDecodeGithubComCloudfoundrySondeGoEvents6(l, v) +} +func easyjson692db02bDecodeGithubComCloudfoundrySondeGoEvents7(in *jlexer.Lexer, out *ContainerMetric) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeString() + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "applicationId": + if in.IsNull() { + in.Skip() + out.ApplicationId = nil + } else { + if out.ApplicationId == nil { + out.ApplicationId = new(string) + } + *out.ApplicationId = string(in.String()) + } + case "instanceIndex": + if in.IsNull() { + in.Skip() + out.InstanceIndex = nil + } else { + if out.InstanceIndex == nil { + out.InstanceIndex = new(int32) + } + *out.InstanceIndex = int32(in.Int32()) + } + case "cpuPercentage": + if in.IsNull() { + in.Skip() + out.CpuPercentage = nil + } else { + if out.CpuPercentage == nil { + out.CpuPercentage = new(float64) + } + *out.CpuPercentage = float64(in.Float64()) + } + case "memoryBytes": + if in.IsNull() { + in.Skip() + out.MemoryBytes = nil + } else { + if out.MemoryBytes == nil { + out.MemoryBytes = new(uint64) + } + *out.MemoryBytes = uint64(in.Uint64()) + } + case "diskBytes": + if in.IsNull() { + in.Skip() + out.DiskBytes = nil + } else { + if out.DiskBytes == nil { + out.DiskBytes = new(uint64) + } + *out.DiskBytes = uint64(in.Uint64()) + } + case "memoryBytesQuota": + if in.IsNull() { + in.Skip() + out.MemoryBytesQuota = nil + } else { + if out.MemoryBytesQuota == nil { + out.MemoryBytesQuota = new(uint64) + } + *out.MemoryBytesQuota = uint64(in.Uint64()) + } + case "diskBytesQuota": + if in.IsNull() { + in.Skip() + out.DiskBytesQuota = nil + } else { + if out.DiskBytesQuota == nil { + out.DiskBytesQuota = new(uint64) + } + *out.DiskBytesQuota = uint64(in.Uint64()) + } + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjson692db02bEncodeGithubComCloudfoundrySondeGoEvents7(out *jwriter.Writer, in ContainerMetric) { + out.RawByte('{') + first := true + _ = first + if in.ApplicationId != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"applicationId\":") + if in.ApplicationId == nil { + out.RawString("null") + } else { + out.String(string(*in.ApplicationId)) + } + } + if in.InstanceIndex != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"instanceIndex\":") + if in.InstanceIndex == nil { + out.RawString("null") + } else { + out.Int32(int32(*in.InstanceIndex)) + } + } + if in.CpuPercentage != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"cpuPercentage\":") + if in.CpuPercentage == nil { + out.RawString("null") + } else { + out.Float64(float64(*in.CpuPercentage)) + } + } + if in.MemoryBytes != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"memoryBytes\":") + if in.MemoryBytes == nil { + out.RawString("null") + } else { + out.Uint64(uint64(*in.MemoryBytes)) + } + } + if in.DiskBytes != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"diskBytes\":") + if in.DiskBytes == nil { + out.RawString("null") + } else { + out.Uint64(uint64(*in.DiskBytes)) + } + } + if in.MemoryBytesQuota != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"memoryBytesQuota\":") + if in.MemoryBytesQuota == nil { + out.RawString("null") + } else { + out.Uint64(uint64(*in.MemoryBytesQuota)) + } + } + if in.DiskBytesQuota != nil { + if !first { + out.RawByte(',') + } + first = false + out.RawString("\"diskBytesQuota\":") + if in.DiskBytesQuota == nil { + out.RawString("null") + } else { + out.Uint64(uint64(*in.DiskBytesQuota)) + } + } + out.RawByte('}') +} + +// MarshalEasyJSON supports easyjson.Marshaler interface +func (v ContainerMetric) MarshalEasyJSON(w *jwriter.Writer) { + easyjson692db02bEncodeGithubComCloudfoundrySondeGoEvents7(w, v) +} + +// UnmarshalEasyJSON supports easyjson.Unmarshaler interface +func (v *ContainerMetric) UnmarshalEasyJSON(l *jlexer.Lexer) { + easyjson692db02bDecodeGithubComCloudfoundrySondeGoEvents7(l, v) +} diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/sonde-go/events/http.pb.go b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/sonde-go/events/http.pb.go index 63a56acb..ff88fa0a 100644 --- a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/sonde-go/events/http.pb.go +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/sonde-go/events/http.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: http.proto -// DO NOT EDIT! package events @@ -214,170 +213,6 @@ func (x *Method) UnmarshalJSON(data []byte) error { } func (Method) EnumDescriptor() ([]byte, []int) { return fileDescriptorHttp, []int{1} } -// / An HttpStart event is emitted when a client sends a request (or immediately when a server receives the request). -type HttpStart struct { - Timestamp *int64 `protobuf:"varint,1,req,name=timestamp" json:"timestamp,omitempty"` - RequestId *UUID `protobuf:"bytes,2,req,name=requestId" json:"requestId,omitempty"` - PeerType *PeerType `protobuf:"varint,3,req,name=peerType,enum=events.PeerType" json:"peerType,omitempty"` - Method *Method `protobuf:"varint,4,req,name=method,enum=events.Method" json:"method,omitempty"` - Uri *string `protobuf:"bytes,5,req,name=uri" json:"uri,omitempty"` - RemoteAddress *string `protobuf:"bytes,6,req,name=remoteAddress" json:"remoteAddress,omitempty"` - UserAgent *string `protobuf:"bytes,7,req,name=userAgent" json:"userAgent,omitempty"` - ParentRequestId *UUID `protobuf:"bytes,8,opt,name=parentRequestId" json:"parentRequestId,omitempty"` - ApplicationId *UUID `protobuf:"bytes,9,opt,name=applicationId" json:"applicationId,omitempty"` - InstanceIndex *int32 `protobuf:"varint,10,opt,name=instanceIndex" json:"instanceIndex,omitempty"` - InstanceId *string `protobuf:"bytes,11,opt,name=instanceId" json:"instanceId,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *HttpStart) Reset() { *m = HttpStart{} } -func (m *HttpStart) String() string { return proto.CompactTextString(m) } -func (*HttpStart) ProtoMessage() {} -func (*HttpStart) Descriptor() ([]byte, []int) { return fileDescriptorHttp, []int{0} } - -func (m *HttpStart) GetTimestamp() int64 { - if m != nil && m.Timestamp != nil { - return *m.Timestamp - } - return 0 -} - -func (m *HttpStart) GetRequestId() *UUID { - if m != nil { - return m.RequestId - } - return nil -} - -func (m *HttpStart) GetPeerType() PeerType { - if m != nil && m.PeerType != nil { - return *m.PeerType - } - return PeerType_Client -} - -func (m *HttpStart) GetMethod() Method { - if m != nil && m.Method != nil { - return *m.Method - } - return Method_GET -} - -func (m *HttpStart) GetUri() string { - if m != nil && m.Uri != nil { - return *m.Uri - } - return "" -} - -func (m *HttpStart) GetRemoteAddress() string { - if m != nil && m.RemoteAddress != nil { - return *m.RemoteAddress - } - return "" -} - -func (m *HttpStart) GetUserAgent() string { - if m != nil && m.UserAgent != nil { - return *m.UserAgent - } - return "" -} - -func (m *HttpStart) GetParentRequestId() *UUID { - if m != nil { - return m.ParentRequestId - } - return nil -} - -func (m *HttpStart) GetApplicationId() *UUID { - if m != nil { - return m.ApplicationId - } - return nil -} - -func (m *HttpStart) GetInstanceIndex() int32 { - if m != nil && m.InstanceIndex != nil { - return *m.InstanceIndex - } - return 0 -} - -func (m *HttpStart) GetInstanceId() string { - if m != nil && m.InstanceId != nil { - return *m.InstanceId - } - return "" -} - -// / An HttpStop event is emitted when a client receives a response to its request (or when a server completes its handling and returns a response). -type HttpStop struct { - Timestamp *int64 `protobuf:"varint,1,req,name=timestamp" json:"timestamp,omitempty"` - Uri *string `protobuf:"bytes,2,req,name=uri" json:"uri,omitempty"` - RequestId *UUID `protobuf:"bytes,3,req,name=requestId" json:"requestId,omitempty"` - PeerType *PeerType `protobuf:"varint,4,req,name=peerType,enum=events.PeerType" json:"peerType,omitempty"` - StatusCode *int32 `protobuf:"varint,5,req,name=statusCode" json:"statusCode,omitempty"` - ContentLength *int64 `protobuf:"varint,6,req,name=contentLength" json:"contentLength,omitempty"` - ApplicationId *UUID `protobuf:"bytes,7,opt,name=applicationId" json:"applicationId,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *HttpStop) Reset() { *m = HttpStop{} } -func (m *HttpStop) String() string { return proto.CompactTextString(m) } -func (*HttpStop) ProtoMessage() {} -func (*HttpStop) Descriptor() ([]byte, []int) { return fileDescriptorHttp, []int{1} } - -func (m *HttpStop) GetTimestamp() int64 { - if m != nil && m.Timestamp != nil { - return *m.Timestamp - } - return 0 -} - -func (m *HttpStop) GetUri() string { - if m != nil && m.Uri != nil { - return *m.Uri - } - return "" -} - -func (m *HttpStop) GetRequestId() *UUID { - if m != nil { - return m.RequestId - } - return nil -} - -func (m *HttpStop) GetPeerType() PeerType { - if m != nil && m.PeerType != nil { - return *m.PeerType - } - return PeerType_Client -} - -func (m *HttpStop) GetStatusCode() int32 { - if m != nil && m.StatusCode != nil { - return *m.StatusCode - } - return 0 -} - -func (m *HttpStop) GetContentLength() int64 { - if m != nil && m.ContentLength != nil { - return *m.ContentLength - } - return 0 -} - -func (m *HttpStop) GetApplicationId() *UUID { - if m != nil { - return m.ApplicationId - } - return nil -} - // / An HttpStartStop event represents the whole lifecycle of an HTTP request. type HttpStartStop struct { StartTimestamp *int64 `protobuf:"varint,1,req,name=startTimestamp" json:"startTimestamp,omitempty"` @@ -400,7 +235,7 @@ type HttpStartStop struct { func (m *HttpStartStop) Reset() { *m = HttpStartStop{} } func (m *HttpStartStop) String() string { return proto.CompactTextString(m) } func (*HttpStartStop) ProtoMessage() {} -func (*HttpStartStop) Descriptor() ([]byte, []int) { return fileDescriptorHttp, []int{2} } +func (*HttpStartStop) Descriptor() ([]byte, []int) { return fileDescriptorHttp, []int{0} } func (m *HttpStartStop) GetStartTimestamp() int64 { if m != nil && m.StartTimestamp != nil { @@ -501,211 +336,21 @@ func (m *HttpStartStop) GetForwarded() []string { } func init() { - proto.RegisterType((*HttpStart)(nil), "events.HttpStart") - proto.RegisterType((*HttpStop)(nil), "events.HttpStop") proto.RegisterType((*HttpStartStop)(nil), "events.HttpStartStop") proto.RegisterEnum("events.PeerType", PeerType_name, PeerType_value) proto.RegisterEnum("events.Method", Method_name, Method_value) } -func (m *HttpStart) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *HttpStart) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Timestamp == nil { - return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("timestamp") - } else { - data[i] = 0x8 - i++ - i = encodeVarintHttp(data, i, uint64(*m.Timestamp)) - } - if m.RequestId == nil { - return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("requestId") - } else { - data[i] = 0x12 - i++ - i = encodeVarintHttp(data, i, uint64(m.RequestId.Size())) - n1, err := m.RequestId.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n1 - } - if m.PeerType == nil { - return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("peerType") - } else { - data[i] = 0x18 - i++ - i = encodeVarintHttp(data, i, uint64(*m.PeerType)) - } - if m.Method == nil { - return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("method") - } else { - data[i] = 0x20 - i++ - i = encodeVarintHttp(data, i, uint64(*m.Method)) - } - if m.Uri == nil { - return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("uri") - } else { - data[i] = 0x2a - i++ - i = encodeVarintHttp(data, i, uint64(len(*m.Uri))) - i += copy(data[i:], *m.Uri) - } - if m.RemoteAddress == nil { - return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("remoteAddress") - } else { - data[i] = 0x32 - i++ - i = encodeVarintHttp(data, i, uint64(len(*m.RemoteAddress))) - i += copy(data[i:], *m.RemoteAddress) - } - if m.UserAgent == nil { - return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("userAgent") - } else { - data[i] = 0x3a - i++ - i = encodeVarintHttp(data, i, uint64(len(*m.UserAgent))) - i += copy(data[i:], *m.UserAgent) - } - if m.ParentRequestId != nil { - data[i] = 0x42 - i++ - i = encodeVarintHttp(data, i, uint64(m.ParentRequestId.Size())) - n2, err := m.ParentRequestId.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n2 - } - if m.ApplicationId != nil { - data[i] = 0x4a - i++ - i = encodeVarintHttp(data, i, uint64(m.ApplicationId.Size())) - n3, err := m.ApplicationId.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n3 - } - if m.InstanceIndex != nil { - data[i] = 0x50 - i++ - i = encodeVarintHttp(data, i, uint64(*m.InstanceIndex)) - } - if m.InstanceId != nil { - data[i] = 0x5a - i++ - i = encodeVarintHttp(data, i, uint64(len(*m.InstanceId))) - i += copy(data[i:], *m.InstanceId) - } - if m.XXX_unrecognized != nil { - i += copy(data[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *HttpStop) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *HttpStop) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Timestamp == nil { - return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("timestamp") - } else { - data[i] = 0x8 - i++ - i = encodeVarintHttp(data, i, uint64(*m.Timestamp)) - } - if m.Uri == nil { - return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("uri") - } else { - data[i] = 0x12 - i++ - i = encodeVarintHttp(data, i, uint64(len(*m.Uri))) - i += copy(data[i:], *m.Uri) - } - if m.RequestId == nil { - return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("requestId") - } else { - data[i] = 0x1a - i++ - i = encodeVarintHttp(data, i, uint64(m.RequestId.Size())) - n4, err := m.RequestId.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n4 - } - if m.PeerType == nil { - return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("peerType") - } else { - data[i] = 0x20 - i++ - i = encodeVarintHttp(data, i, uint64(*m.PeerType)) - } - if m.StatusCode == nil { - return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("statusCode") - } else { - data[i] = 0x28 - i++ - i = encodeVarintHttp(data, i, uint64(*m.StatusCode)) - } - if m.ContentLength == nil { - return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("contentLength") - } else { - data[i] = 0x30 - i++ - i = encodeVarintHttp(data, i, uint64(*m.ContentLength)) - } - if m.ApplicationId != nil { - data[i] = 0x3a - i++ - i = encodeVarintHttp(data, i, uint64(m.ApplicationId.Size())) - n5, err := m.ApplicationId.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n5 - } - if m.XXX_unrecognized != nil { - i += copy(data[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *HttpStartStop) Marshal() (data []byte, err error) { +func (m *HttpStartStop) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *HttpStartStop) MarshalTo(data []byte) (int, error) { +func (m *HttpStartStop) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int @@ -713,155 +358,158 @@ func (m *HttpStartStop) MarshalTo(data []byte) (int, error) { if m.StartTimestamp == nil { return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("startTimestamp") } else { - data[i] = 0x8 + dAtA[i] = 0x8 i++ - i = encodeVarintHttp(data, i, uint64(*m.StartTimestamp)) + i = encodeVarintHttp(dAtA, i, uint64(*m.StartTimestamp)) } if m.StopTimestamp == nil { return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("stopTimestamp") } else { - data[i] = 0x10 + dAtA[i] = 0x10 i++ - i = encodeVarintHttp(data, i, uint64(*m.StopTimestamp)) + i = encodeVarintHttp(dAtA, i, uint64(*m.StopTimestamp)) } if m.RequestId == nil { return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("requestId") } else { - data[i] = 0x1a + dAtA[i] = 0x1a i++ - i = encodeVarintHttp(data, i, uint64(m.RequestId.Size())) - n6, err := m.RequestId.MarshalTo(data[i:]) + i = encodeVarintHttp(dAtA, i, uint64(m.RequestId.Size())) + n1, err := m.RequestId.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n6 + i += n1 } if m.PeerType == nil { return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("peerType") } else { - data[i] = 0x20 + dAtA[i] = 0x20 i++ - i = encodeVarintHttp(data, i, uint64(*m.PeerType)) + i = encodeVarintHttp(dAtA, i, uint64(*m.PeerType)) } if m.Method == nil { return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("method") } else { - data[i] = 0x28 + dAtA[i] = 0x28 i++ - i = encodeVarintHttp(data, i, uint64(*m.Method)) + i = encodeVarintHttp(dAtA, i, uint64(*m.Method)) } if m.Uri == nil { return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("uri") } else { - data[i] = 0x32 + dAtA[i] = 0x32 i++ - i = encodeVarintHttp(data, i, uint64(len(*m.Uri))) - i += copy(data[i:], *m.Uri) + i = encodeVarintHttp(dAtA, i, uint64(len(*m.Uri))) + i += copy(dAtA[i:], *m.Uri) } if m.RemoteAddress == nil { return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("remoteAddress") } else { - data[i] = 0x3a + dAtA[i] = 0x3a i++ - i = encodeVarintHttp(data, i, uint64(len(*m.RemoteAddress))) - i += copy(data[i:], *m.RemoteAddress) + i = encodeVarintHttp(dAtA, i, uint64(len(*m.RemoteAddress))) + i += copy(dAtA[i:], *m.RemoteAddress) } if m.UserAgent == nil { return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("userAgent") } else { - data[i] = 0x42 + dAtA[i] = 0x42 i++ - i = encodeVarintHttp(data, i, uint64(len(*m.UserAgent))) - i += copy(data[i:], *m.UserAgent) + i = encodeVarintHttp(dAtA, i, uint64(len(*m.UserAgent))) + i += copy(dAtA[i:], *m.UserAgent) } if m.StatusCode == nil { return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("statusCode") } else { - data[i] = 0x48 + dAtA[i] = 0x48 i++ - i = encodeVarintHttp(data, i, uint64(*m.StatusCode)) + i = encodeVarintHttp(dAtA, i, uint64(*m.StatusCode)) } if m.ContentLength == nil { return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("contentLength") } else { - data[i] = 0x50 + dAtA[i] = 0x50 i++ - i = encodeVarintHttp(data, i, uint64(*m.ContentLength)) + i = encodeVarintHttp(dAtA, i, uint64(*m.ContentLength)) } if m.ApplicationId != nil { - data[i] = 0x62 + dAtA[i] = 0x62 i++ - i = encodeVarintHttp(data, i, uint64(m.ApplicationId.Size())) - n7, err := m.ApplicationId.MarshalTo(data[i:]) + i = encodeVarintHttp(dAtA, i, uint64(m.ApplicationId.Size())) + n2, err := m.ApplicationId.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n7 + i += n2 } if m.InstanceIndex != nil { - data[i] = 0x68 + dAtA[i] = 0x68 i++ - i = encodeVarintHttp(data, i, uint64(*m.InstanceIndex)) + i = encodeVarintHttp(dAtA, i, uint64(*m.InstanceIndex)) } if m.InstanceId != nil { - data[i] = 0x72 + dAtA[i] = 0x72 i++ - i = encodeVarintHttp(data, i, uint64(len(*m.InstanceId))) - i += copy(data[i:], *m.InstanceId) + i = encodeVarintHttp(dAtA, i, uint64(len(*m.InstanceId))) + i += copy(dAtA[i:], *m.InstanceId) } if len(m.Forwarded) > 0 { for _, s := range m.Forwarded { - data[i] = 0x7a + dAtA[i] = 0x7a i++ l = len(s) for l >= 1<<7 { - data[i] = uint8(uint64(l)&0x7f | 0x80) + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) l >>= 7 i++ } - data[i] = uint8(l) + dAtA[i] = uint8(l) i++ - i += copy(data[i:], s) + i += copy(dAtA[i:], s) } } if m.XXX_unrecognized != nil { - i += copy(data[i:], m.XXX_unrecognized) + i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } -func encodeFixed64Http(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) +func encodeFixed64Http(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) return offset + 8 } -func encodeFixed32Http(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) +func encodeFixed32Http(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) return offset + 4 } -func encodeVarintHttp(data []byte, offset int, v uint64) int { +func encodeVarintHttp(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) + dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } - data[offset] = uint8(v) + dAtA[offset] = uint8(v) return offset + 1 } -func (m *HttpStart) Size() (n int) { +func (m *HttpStartStop) Size() (n int) { var l int _ = l - if m.Timestamp != nil { - n += 1 + sovHttp(uint64(*m.Timestamp)) + if m.StartTimestamp != nil { + n += 1 + sovHttp(uint64(*m.StartTimestamp)) + } + if m.StopTimestamp != nil { + n += 1 + sovHttp(uint64(*m.StopTimestamp)) } if m.RequestId != nil { l = m.RequestId.Size() @@ -885,9 +533,11 @@ func (m *HttpStart) Size() (n int) { l = len(*m.UserAgent) n += 1 + l + sovHttp(uint64(l)) } - if m.ParentRequestId != nil { - l = m.ParentRequestId.Size() - n += 1 + l + sovHttp(uint64(l)) + if m.StatusCode != nil { + n += 1 + sovHttp(uint64(*m.StatusCode)) + } + if m.ContentLength != nil { + n += 1 + sovHttp(uint64(*m.ContentLength)) } if m.ApplicationId != nil { l = m.ApplicationId.Size() @@ -899,99 +549,12 @@ func (m *HttpStart) Size() (n int) { if m.InstanceId != nil { l = len(*m.InstanceId) n += 1 + l + sovHttp(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *HttpStop) Size() (n int) { - var l int - _ = l - if m.Timestamp != nil { - n += 1 + sovHttp(uint64(*m.Timestamp)) - } - if m.Uri != nil { - l = len(*m.Uri) - n += 1 + l + sovHttp(uint64(l)) - } - if m.RequestId != nil { - l = m.RequestId.Size() - n += 1 + l + sovHttp(uint64(l)) - } - if m.PeerType != nil { - n += 1 + sovHttp(uint64(*m.PeerType)) - } - if m.StatusCode != nil { - n += 1 + sovHttp(uint64(*m.StatusCode)) - } - if m.ContentLength != nil { - n += 1 + sovHttp(uint64(*m.ContentLength)) - } - if m.ApplicationId != nil { - l = m.ApplicationId.Size() - n += 1 + l + sovHttp(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *HttpStartStop) Size() (n int) { - var l int - _ = l - if m.StartTimestamp != nil { - n += 1 + sovHttp(uint64(*m.StartTimestamp)) - } - if m.StopTimestamp != nil { - n += 1 + sovHttp(uint64(*m.StopTimestamp)) - } - if m.RequestId != nil { - l = m.RequestId.Size() - n += 1 + l + sovHttp(uint64(l)) - } - if m.PeerType != nil { - n += 1 + sovHttp(uint64(*m.PeerType)) - } - if m.Method != nil { - n += 1 + sovHttp(uint64(*m.Method)) - } - if m.Uri != nil { - l = len(*m.Uri) - n += 1 + l + sovHttp(uint64(l)) - } - if m.RemoteAddress != nil { - l = len(*m.RemoteAddress) - n += 1 + l + sovHttp(uint64(l)) - } - if m.UserAgent != nil { - l = len(*m.UserAgent) - n += 1 + l + sovHttp(uint64(l)) - } - if m.StatusCode != nil { - n += 1 + sovHttp(uint64(*m.StatusCode)) - } - if m.ContentLength != nil { - n += 1 + sovHttp(uint64(*m.ContentLength)) - } - if m.ApplicationId != nil { - l = m.ApplicationId.Size() - n += 1 + l + sovHttp(uint64(l)) - } - if m.InstanceIndex != nil { - n += 1 + sovHttp(uint64(*m.InstanceIndex)) - } - if m.InstanceId != nil { - l = len(*m.InstanceId) - n += 1 + l + sovHttp(uint64(l)) - } - if len(m.Forwarded) > 0 { - for _, s := range m.Forwarded { - l = len(s) - n += 1 + l + sovHttp(uint64(l)) - } + } + if len(m.Forwarded) > 0 { + for _, s := range m.Forwarded { + l = len(s) + n += 1 + l + sovHttp(uint64(l)) + } } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) @@ -1012,640 +575,9 @@ func sovHttp(x uint64) (n int) { func sozHttp(x uint64) (n int) { return sovHttp(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (m *HttpStart) Unmarshal(data []byte) error { - var hasFields [1]uint64 - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHttp - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HttpStart: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HttpStart: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHttp - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Timestamp = &v - hasFields[0] |= uint64(0x00000001) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequestId", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHttp - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthHttp - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RequestId == nil { - m.RequestId = &UUID{} - } - if err := m.RequestId.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - hasFields[0] |= uint64(0x00000002) - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerType", wireType) - } - var v PeerType - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHttp - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (PeerType(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.PeerType = &v - hasFields[0] |= uint64(0x00000004) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Method", wireType) - } - var v Method - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHttp - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (Method(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Method = &v - hasFields[0] |= uint64(0x00000008) - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Uri", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHttp - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHttp - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(data[iNdEx:postIndex]) - m.Uri = &s - iNdEx = postIndex - hasFields[0] |= uint64(0x00000010) - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RemoteAddress", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHttp - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHttp - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(data[iNdEx:postIndex]) - m.RemoteAddress = &s - iNdEx = postIndex - hasFields[0] |= uint64(0x00000020) - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field UserAgent", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHttp - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHttp - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(data[iNdEx:postIndex]) - m.UserAgent = &s - iNdEx = postIndex - hasFields[0] |= uint64(0x00000040) - case 8: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ParentRequestId", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHttp - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthHttp - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ParentRequestId == nil { - m.ParentRequestId = &UUID{} - } - if err := m.ParentRequestId.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ApplicationId", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHttp - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthHttp - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ApplicationId == nil { - m.ApplicationId = &UUID{} - } - if err := m.ApplicationId.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field InstanceIndex", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHttp - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.InstanceIndex = &v - case 11: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field InstanceId", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHttp - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHttp - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(data[iNdEx:postIndex]) - m.InstanceId = &s - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipHttp(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthHttp - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if hasFields[0]&uint64(0x00000001) == 0 { - return github_com_gogo_protobuf_proto.NewRequiredNotSetError("timestamp") - } - if hasFields[0]&uint64(0x00000002) == 0 { - return github_com_gogo_protobuf_proto.NewRequiredNotSetError("requestId") - } - if hasFields[0]&uint64(0x00000004) == 0 { - return github_com_gogo_protobuf_proto.NewRequiredNotSetError("peerType") - } - if hasFields[0]&uint64(0x00000008) == 0 { - return github_com_gogo_protobuf_proto.NewRequiredNotSetError("method") - } - if hasFields[0]&uint64(0x00000010) == 0 { - return github_com_gogo_protobuf_proto.NewRequiredNotSetError("uri") - } - if hasFields[0]&uint64(0x00000020) == 0 { - return github_com_gogo_protobuf_proto.NewRequiredNotSetError("remoteAddress") - } - if hasFields[0]&uint64(0x00000040) == 0 { - return github_com_gogo_protobuf_proto.NewRequiredNotSetError("userAgent") - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HttpStop) Unmarshal(data []byte) error { - var hasFields [1]uint64 - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHttp - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HttpStop: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HttpStop: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHttp - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Timestamp = &v - hasFields[0] |= uint64(0x00000001) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Uri", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHttp - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthHttp - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(data[iNdEx:postIndex]) - m.Uri = &s - iNdEx = postIndex - hasFields[0] |= uint64(0x00000002) - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RequestId", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHttp - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthHttp - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.RequestId == nil { - m.RequestId = &UUID{} - } - if err := m.RequestId.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - hasFields[0] |= uint64(0x00000004) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field PeerType", wireType) - } - var v PeerType - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHttp - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (PeerType(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.PeerType = &v - hasFields[0] |= uint64(0x00000008) - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field StatusCode", wireType) - } - var v int32 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHttp - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.StatusCode = &v - hasFields[0] |= uint64(0x00000010) - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ContentLength", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHttp - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.ContentLength = &v - hasFields[0] |= uint64(0x00000020) - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ApplicationId", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowHttp - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthHttp - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.ApplicationId == nil { - m.ApplicationId = &UUID{} - } - if err := m.ApplicationId.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipHttp(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthHttp - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if hasFields[0]&uint64(0x00000001) == 0 { - return github_com_gogo_protobuf_proto.NewRequiredNotSetError("timestamp") - } - if hasFields[0]&uint64(0x00000002) == 0 { - return github_com_gogo_protobuf_proto.NewRequiredNotSetError("uri") - } - if hasFields[0]&uint64(0x00000004) == 0 { - return github_com_gogo_protobuf_proto.NewRequiredNotSetError("requestId") - } - if hasFields[0]&uint64(0x00000008) == 0 { - return github_com_gogo_protobuf_proto.NewRequiredNotSetError("peerType") - } - if hasFields[0]&uint64(0x00000010) == 0 { - return github_com_gogo_protobuf_proto.NewRequiredNotSetError("statusCode") - } - if hasFields[0]&uint64(0x00000020) == 0 { - return github_com_gogo_protobuf_proto.NewRequiredNotSetError("contentLength") - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HttpStartStop) Unmarshal(data []byte) error { +func (m *HttpStartStop) Unmarshal(dAtA []byte) error { var hasFields [1]uint64 - l := len(data) + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -1657,7 +589,7 @@ func (m *HttpStartStop) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1685,7 +617,7 @@ func (m *HttpStartStop) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int64(b) & 0x7F) << shift if b < 0x80 { @@ -1706,7 +638,7 @@ func (m *HttpStartStop) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int64(b) & 0x7F) << shift if b < 0x80 { @@ -1727,7 +659,7 @@ func (m *HttpStartStop) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1744,7 +676,7 @@ func (m *HttpStartStop) Unmarshal(data []byte) error { if m.RequestId == nil { m.RequestId = &UUID{} } - if err := m.RequestId.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.RequestId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1761,7 +693,7 @@ func (m *HttpStartStop) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (PeerType(b) & 0x7F) << shift if b < 0x80 { @@ -1782,7 +714,7 @@ func (m *HttpStartStop) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (Method(b) & 0x7F) << shift if b < 0x80 { @@ -1803,7 +735,7 @@ func (m *HttpStartStop) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1818,7 +750,7 @@ func (m *HttpStartStop) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(data[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) m.Uri = &s iNdEx = postIndex hasFields[0] |= uint64(0x00000020) @@ -1834,7 +766,7 @@ func (m *HttpStartStop) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1849,7 +781,7 @@ func (m *HttpStartStop) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(data[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) m.RemoteAddress = &s iNdEx = postIndex hasFields[0] |= uint64(0x00000040) @@ -1865,7 +797,7 @@ func (m *HttpStartStop) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1880,7 +812,7 @@ func (m *HttpStartStop) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(data[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) m.UserAgent = &s iNdEx = postIndex hasFields[0] |= uint64(0x00000080) @@ -1896,7 +828,7 @@ func (m *HttpStartStop) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -1917,7 +849,7 @@ func (m *HttpStartStop) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int64(b) & 0x7F) << shift if b < 0x80 { @@ -1938,7 +870,7 @@ func (m *HttpStartStop) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1955,7 +887,7 @@ func (m *HttpStartStop) Unmarshal(data []byte) error { if m.ApplicationId == nil { m.ApplicationId = &UUID{} } - if err := m.ApplicationId.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.ApplicationId.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -1971,7 +903,7 @@ func (m *HttpStartStop) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -1991,7 +923,7 @@ func (m *HttpStartStop) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2006,7 +938,7 @@ func (m *HttpStartStop) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(data[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) m.InstanceId = &s iNdEx = postIndex case 15: @@ -2021,7 +953,7 @@ func (m *HttpStartStop) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2036,11 +968,11 @@ func (m *HttpStartStop) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Forwarded = append(m.Forwarded, string(data[iNdEx:postIndex])) + m.Forwarded = append(m.Forwarded, string(dAtA[iNdEx:postIndex])) iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipHttp(data[iNdEx:]) + skippy, err := skipHttp(dAtA[iNdEx:]) if err != nil { return err } @@ -2050,7 +982,7 @@ func (m *HttpStartStop) Unmarshal(data []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -2090,8 +1022,8 @@ func (m *HttpStartStop) Unmarshal(data []byte) error { } return nil } -func skipHttp(data []byte) (n int, err error) { - l := len(data) +func skipHttp(dAtA []byte) (n int, err error) { + l := len(dAtA) iNdEx := 0 for iNdEx < l { var wire uint64 @@ -2102,7 +1034,7 @@ func skipHttp(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2120,7 +1052,7 @@ func skipHttp(data []byte) (n int, err error) { return 0, io.ErrUnexpectedEOF } iNdEx++ - if data[iNdEx-1] < 0x80 { + if dAtA[iNdEx-1] < 0x80 { break } } @@ -2137,7 +1069,7 @@ func skipHttp(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ length |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -2160,7 +1092,7 @@ func skipHttp(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ innerWire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -2171,7 +1103,7 @@ func skipHttp(data []byte) (n int, err error) { if innerWireType == 4 { break } - next, err := skipHttp(data[start:]) + next, err := skipHttp(dAtA[start:]) if err != nil { return 0, err } @@ -2198,63 +1130,56 @@ var ( func init() { proto.RegisterFile("http.proto", fileDescriptorHttp) } var fileDescriptorHttp = []byte{ - // 913 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xb4, 0x54, 0xcf, 0x72, 0xe3, 0xc4, - 0x13, 0xae, 0xf8, 0xbf, 0xda, 0xb1, 0x3d, 0x99, 0xdd, 0xfd, 0xfd, 0x44, 0x58, 0x42, 0xd6, 0x2c, - 0x61, 0x09, 0x8b, 0xb7, 0x2a, 0x07, 0x4e, 0x5c, 0x64, 0x69, 0x12, 0x0b, 0xd9, 0x92, 0x6a, 0x2c, - 0x67, 0xd9, 0x13, 0xe5, 0xb5, 0x26, 0x8e, 0xab, 0x62, 0x8f, 0xd0, 0x9f, 0x85, 0xbc, 0x04, 0x17, - 0x1e, 0x85, 0x77, 0xa0, 0x38, 0xf2, 0x08, 0x14, 0x4f, 0xc2, 0xcc, 0xc8, 0xb1, 0x63, 0x2f, 0x85, - 0x53, 0x54, 0x71, 0x70, 0x55, 0xf7, 0xd7, 0x5f, 0xf7, 0x74, 0x7f, 0xdd, 0x32, 0xc0, 0x75, 0x9a, - 0x46, 0x9d, 0x28, 0xe6, 0x29, 0xc7, 0x15, 0xf6, 0x8e, 0x2d, 0xd2, 0xe4, 0xf0, 0xcb, 0xe9, 0x2c, - 0xbd, 0xce, 0xde, 0x76, 0x26, 0x7c, 0xfe, 0x6a, 0xca, 0xa7, 0xfc, 0x95, 0x0a, 0xbf, 0xcd, 0xae, - 0x94, 0xa7, 0x1c, 0x65, 0xe5, 0x69, 0x87, 0x90, 0x65, 0xb3, 0x30, 0xb7, 0xdb, 0xbf, 0x14, 0x41, - 0xeb, 0x89, 0x8a, 0xc3, 0x74, 0x1c, 0xa7, 0xf8, 0x29, 0x68, 0xe9, 0x6c, 0xce, 0x92, 0x74, 0x3c, - 0x8f, 0xf4, 0xbd, 0xe3, 0xc2, 0x8b, 0x22, 0x5d, 0x03, 0xf8, 0x14, 0xb4, 0x98, 0x7d, 0x9f, 0x09, - 0xcf, 0x0e, 0xf5, 0x82, 0x88, 0xd6, 0xcf, 0xf6, 0x3b, 0x79, 0x0b, 0x9d, 0xd1, 0xc8, 0xb6, 0xe8, - 0x3a, 0x8c, 0x5f, 0x42, 0x2d, 0x62, 0x2c, 0x0e, 0x6e, 0x23, 0xa6, 0x17, 0x05, 0xb5, 0x79, 0x86, - 0xee, 0xa8, 0xfe, 0x12, 0xa7, 0x2b, 0x06, 0x3e, 0x81, 0xca, 0x9c, 0xa5, 0xd7, 0x3c, 0xd4, 0x4b, - 0x8a, 0xdb, 0xbc, 0xe3, 0x0e, 0x14, 0x4a, 0x97, 0x51, 0x8c, 0xa0, 0x98, 0xc5, 0x33, 0xbd, 0x2c, - 0x48, 0x1a, 0x95, 0x26, 0x7e, 0x0e, 0x8d, 0x98, 0xcd, 0x79, 0xca, 0x8c, 0x30, 0x8c, 0x59, 0x92, - 0xe8, 0x15, 0x15, 0xdb, 0x04, 0xe5, 0x5c, 0x59, 0xc2, 0x62, 0x63, 0x2a, 0x8a, 0xea, 0x55, 0xc5, - 0x58, 0x03, 0xf8, 0x2b, 0x68, 0x45, 0xe3, 0x58, 0x58, 0x74, 0x35, 0x5d, 0xed, 0x78, 0xef, 0xbd, - 0xe9, 0xb6, 0x49, 0xf8, 0x0c, 0x1a, 0xe3, 0x28, 0xba, 0x99, 0x4d, 0xc6, 0xe9, 0x8c, 0x2f, 0x44, - 0x96, 0xf6, 0x37, 0x59, 0x9b, 0x14, 0xd9, 0xef, 0x6c, 0x21, 0xe4, 0x5c, 0x4c, 0x98, 0xbd, 0x08, - 0xd9, 0x8f, 0x3a, 0x88, 0x9c, 0x32, 0xdd, 0x04, 0xf1, 0x11, 0xc0, 0x0a, 0x08, 0xf5, 0xba, 0xa0, - 0x68, 0xf4, 0x1e, 0xd2, 0xfe, 0xa9, 0x00, 0xb5, 0x7c, 0x6b, 0x3c, 0xda, 0xb1, 0xb4, 0xa5, 0x64, - 0x85, 0xb5, 0x64, 0x1b, 0x6b, 0x2c, 0x3e, 0x7c, 0x8d, 0xa5, 0x9d, 0x6b, 0x14, 0x6d, 0x8b, 0x47, - 0xd3, 0x2c, 0x31, 0x79, 0xc8, 0xd4, 0x96, 0xca, 0xf4, 0x1e, 0x22, 0x87, 0x9f, 0xf0, 0x45, 0x2a, - 0xd2, 0xfb, 0x6c, 0x31, 0x4d, 0xaf, 0xd5, 0xb2, 0x8a, 0x74, 0x13, 0x7c, 0x5f, 0xd6, 0xea, 0x4e, - 0x59, 0xdb, 0x3f, 0x97, 0xa0, 0xb1, 0x3a, 0x63, 0xa5, 0xca, 0x09, 0x34, 0x13, 0xe9, 0x04, 0x5b, - 0xd2, 0x6c, 0xa1, 0xb2, 0xa7, 0x44, 0xf0, 0xd7, 0xb4, 0x42, 0xde, 0xd3, 0x06, 0xf8, 0x1f, 0x6a, - 0xb6, 0x3e, 0xfd, 0xf2, 0x43, 0x4e, 0xbf, 0xf2, 0x0f, 0xa7, 0x5f, 0xdd, 0x79, 0xfa, 0xb5, 0xed, - 0xd3, 0xdf, 0xdc, 0x98, 0xb6, 0x7b, 0x63, 0xf0, 0xa0, 0x8d, 0xed, 0xff, 0x8b, 0x0f, 0xa1, 0xb1, - 0xfb, 0x43, 0x68, 0x6e, 0x7f, 0x08, 0x72, 0xba, 0x2b, 0x1e, 0xff, 0x30, 0x8e, 0x43, 0x16, 0xea, - 0xad, 0xe3, 0xa2, 0x9c, 0x6e, 0x05, 0x9c, 0xb6, 0xa1, 0x76, 0xa7, 0x38, 0x06, 0xa8, 0x98, 0x37, - 0x33, 0xd1, 0x0e, 0xda, 0x93, 0xf6, 0x90, 0xc5, 0xef, 0x58, 0x8c, 0x0a, 0xa7, 0xbf, 0x96, 0xa0, - 0x92, 0x4b, 0x8d, 0xab, 0x50, 0xbc, 0x20, 0x81, 0x88, 0xd7, 0xa0, 0xe4, 0x7b, 0xc3, 0x00, 0x15, - 0x24, 0xe4, 0x8f, 0x02, 0x54, 0x94, 0x29, 0x16, 0xe9, 0x93, 0x80, 0xa0, 0x92, 0x0c, 0xf7, 0x88, - 0x61, 0xa1, 0xb2, 0x0c, 0x1b, 0x66, 0x1f, 0x55, 0xf0, 0x63, 0x40, 0x5d, 0x63, 0x48, 0xfa, 0xb6, - 0x4b, 0xbe, 0x33, 0x3d, 0x37, 0xa0, 0x5e, 0x1f, 0x55, 0x25, 0xb1, 0x6b, 0xbb, 0x16, 0xaa, 0xe1, - 0x3a, 0x54, 0xcd, 0x1e, 0x31, 0x1d, 0xdb, 0x45, 0x1a, 0xde, 0x87, 0x9a, 0x72, 0x3c, 0x51, 0x19, - 0x54, 0xc8, 0x73, 0x5d, 0x62, 0x06, 0xa8, 0x2e, 0x33, 0x4c, 0xcf, 0x7f, 0x83, 0xf6, 0xb1, 0x06, - 0x65, 0x8b, 0x74, 0x47, 0x17, 0xa8, 0x21, 0xcd, 0xbe, 0xd1, 0x25, 0x7d, 0xd4, 0x94, 0x71, 0xf1, - 0x86, 0x83, 0x5a, 0xca, 0xf2, 0x4c, 0x07, 0x21, 0x19, 0x1e, 0x10, 0x7a, 0x41, 0xd0, 0x01, 0x6e, - 0x02, 0x0c, 0x1c, 0xc3, 0x0c, 0xec, 0x4b, 0x3b, 0x78, 0x83, 0x70, 0xee, 0x9b, 0x46, 0x9f, 0xb8, - 0x96, 0x41, 0xd1, 0x23, 0x45, 0x75, 0x4c, 0xd1, 0xdb, 0x63, 0x7c, 0x00, 0x8d, 0x81, 0x43, 0x89, - 0x65, 0x53, 0xf1, 0x32, 0x25, 0xe7, 0xe8, 0x09, 0x6e, 0x41, 0x7d, 0xe0, 0xbc, 0xf6, 0xa8, 0x33, - 0xf4, 0x0d, 0x93, 0xa0, 0xff, 0xc9, 0x37, 0x06, 0xde, 0x25, 0x41, 0xff, 0x97, 0x4d, 0x7a, 0x7e, - 0x60, 0x7b, 0xee, 0x10, 0xe9, 0xb2, 0xaa, 0x47, 0x2d, 0x42, 0x7d, 0x23, 0x30, 0x7b, 0xe8, 0x03, - 0x59, 0x35, 0x37, 0x0f, 0x95, 0x5e, 0xd4, 0x46, 0x1f, 0xca, 0x19, 0x7d, 0xea, 0xf9, 0xe7, 0x72, - 0xfc, 0xa7, 0xb8, 0x01, 0x9a, 0xf4, 0x72, 0xd6, 0x47, 0x52, 0x4c, 0x4a, 0x94, 0x32, 0x47, 0xb9, - 0xed, 0x7b, 0x34, 0x40, 0x1f, 0xab, 0xbd, 0x10, 0x83, 0x0a, 0xce, 0xb1, 0x7c, 0x64, 0xd8, 0xf3, - 0x5e, 0x0f, 0x48, 0xd0, 0xf3, 0x2c, 0xf4, 0x4c, 0x96, 0x50, 0x6d, 0x7d, 0x33, 0x1a, 0xf8, 0xa8, - 0x2d, 0xc3, 0x01, 0xf9, 0x36, 0x58, 0xd2, 0x3f, 0x91, 0x3d, 0x04, 0x54, 0x76, 0xfd, 0xfc, 0xce, - 0x74, 0xd0, 0xa7, 0xb2, 0xe0, 0xc8, 0x55, 0x0f, 0x9d, 0xc8, 0x8c, 0x91, 0xbb, 0xd2, 0xfd, 0xb3, - 0x3c, 0xa6, 0xc4, 0x7c, 0xb1, 0xb4, 0xa5, 0x9c, 0x9f, 0x2b, 0xdb, 0xb7, 0x0c, 0xb1, 0xe9, 0x53, - 0xfc, 0x04, 0x0e, 0x72, 0xfb, 0xbe, 0x50, 0x5f, 0xe0, 0x47, 0xd0, 0xba, 0x24, 0x74, 0x28, 0xe4, - 0x58, 0x2d, 0xfb, 0x65, 0xf7, 0xeb, 0xdf, 0xfe, 0x3c, 0xda, 0xfb, 0x5d, 0xfc, 0xfe, 0x10, 0x3f, - 0x78, 0xc6, 0xe3, 0x69, 0x67, 0x72, 0xc3, 0xb3, 0xf0, 0x8a, 0x67, 0x8b, 0x30, 0xbe, 0xed, 0x84, - 0x31, 0x8f, 0x12, 0x2e, 0xae, 0x7a, 0xf9, 0x55, 0x74, 0xeb, 0xf2, 0x0f, 0xeb, 0x7c, 0x3c, 0x49, - 0x79, 0x7c, 0xfb, 0x57, 0x00, 0x00, 0x00, 0xff, 0xff, 0x40, 0x9d, 0x30, 0x48, 0xd7, 0x07, 0x00, - 0x00, + // 811 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x54, 0xdd, 0x72, 0xda, 0x46, + 0x18, 0x1d, 0xc0, 0x60, 0x58, 0x0c, 0xfe, 0xbc, 0x49, 0x5a, 0xd5, 0x4d, 0xa9, 0xe2, 0xa6, 0x2e, + 0x75, 0x53, 0x32, 0xe3, 0xeb, 0xde, 0x08, 0x69, 0x6d, 0x54, 0x84, 0x56, 0xb3, 0x2c, 0x4e, 0x73, + 0xd5, 0x21, 0x68, 0x8d, 0x99, 0x31, 0x5a, 0x55, 0x5a, 0xa5, 0xf5, 0x73, 0xf4, 0x9d, 0x3a, 0xbd, + 0xec, 0x23, 0x74, 0xfc, 0x24, 0x9d, 0x5d, 0x61, 0x6c, 0x9a, 0xbb, 0xf3, 0x9d, 0x73, 0xf6, 0xfb, + 0x39, 0x1a, 0x40, 0xe8, 0x46, 0xa9, 0x74, 0x90, 0x66, 0x52, 0x49, 0xdc, 0x10, 0x1f, 0x45, 0xa2, + 0xf2, 0xe3, 0x1f, 0x97, 0x2b, 0x75, 0x53, 0x7c, 0x18, 0x2c, 0xe4, 0xfa, 0xed, 0x52, 0x2e, 0xe5, + 0x5b, 0x23, 0x7f, 0x28, 0xae, 0x4d, 0x65, 0x0a, 0x83, 0xca, 0x67, 0xc7, 0xa8, 0x28, 0x56, 0x71, + 0x89, 0x4f, 0xfe, 0xdc, 0x43, 0x9d, 0x91, 0x52, 0xe9, 0x54, 0xcd, 0x33, 0x35, 0x55, 0x32, 0xc5, + 0xa7, 0xa8, 0x9b, 0xeb, 0x82, 0xaf, 0xd6, 0x22, 0x57, 0xf3, 0x75, 0x6a, 0x55, 0xec, 0x6a, 0xbf, + 0xc6, 0xfe, 0xc7, 0xe2, 0xd7, 0xa8, 0x93, 0x2b, 0x99, 0x3e, 0xda, 0xaa, 0xc6, 0xb6, 0x4b, 0xe2, + 0x33, 0xd4, 0xca, 0xc4, 0x6f, 0x85, 0xc8, 0x95, 0x1f, 0x5b, 0x35, 0xbb, 0xda, 0x6f, 0x9f, 0x1f, + 0x0c, 0xca, 0xb5, 0x07, 0xb3, 0x99, 0xef, 0xb1, 0x47, 0x19, 0xbf, 0x41, 0xcd, 0x54, 0x88, 0x8c, + 0xdf, 0xa5, 0xc2, 0xda, 0xb3, 0xab, 0xfd, 0xee, 0x39, 0x3c, 0x58, 0xa3, 0x0d, 0xcf, 0xb6, 0x0e, + 0x7c, 0x8a, 0x1a, 0x6b, 0xa1, 0x6e, 0x64, 0x6c, 0xd5, 0x8d, 0xb7, 0xfb, 0xe0, 0x9d, 0x18, 0x96, + 0x6d, 0x54, 0x0c, 0xa8, 0x56, 0x64, 0x2b, 0xab, 0x61, 0x57, 0xfb, 0x2d, 0xa6, 0xa1, 0xde, 0x3c, + 0x13, 0x6b, 0xa9, 0x84, 0x13, 0xc7, 0x99, 0xc8, 0x73, 0x6b, 0xdf, 0x68, 0xbb, 0x24, 0x7e, 0x89, + 0x5a, 0x45, 0x2e, 0x32, 0x67, 0x29, 0x12, 0x65, 0x35, 0x8d, 0xe3, 0x91, 0xc0, 0x3d, 0x84, 0x72, + 0x35, 0x57, 0x45, 0xee, 0xca, 0x58, 0x58, 0x2d, 0xbb, 0xda, 0xaf, 0xb3, 0x27, 0x8c, 0x9e, 0xb1, + 0x90, 0x89, 0x12, 0x89, 0x0a, 0x44, 0xb2, 0x54, 0x37, 0x16, 0x2a, 0xd3, 0xd9, 0x21, 0xf1, 0x39, + 0xea, 0xcc, 0xd3, 0xf4, 0x76, 0xb5, 0x98, 0xab, 0x95, 0x4c, 0xfc, 0xd8, 0x3a, 0xb0, 0x2b, 0x9f, + 0x24, 0xb4, 0x6b, 0xd1, 0x9d, 0x57, 0x49, 0xae, 0xe6, 0xc9, 0x42, 0xf8, 0x49, 0x2c, 0xfe, 0xb0, + 0x3a, 0x76, 0xa5, 0x5f, 0x67, 0xbb, 0xa4, 0xde, 0x6f, 0x4b, 0xc4, 0x56, 0xd7, 0xae, 0xf4, 0x5b, + 0xec, 0x09, 0xa3, 0xaf, 0xbb, 0x96, 0xd9, 0xef, 0xf3, 0x2c, 0x16, 0xb1, 0x75, 0x68, 0xd7, 0xf4, + 0x75, 0x5b, 0xe2, 0xec, 0x04, 0x35, 0x1f, 0x12, 0xc7, 0x08, 0x35, 0xdc, 0xdb, 0x95, 0x48, 0x14, + 0x54, 0x34, 0x9e, 0x8a, 0xec, 0xa3, 0xc8, 0xa0, 0x7a, 0xf6, 0xd7, 0x1e, 0x6a, 0x94, 0x51, 0xe3, + 0x7d, 0x54, 0xbb, 0x24, 0x1c, 0x2a, 0xb8, 0x89, 0xf6, 0x22, 0x3a, 0xe5, 0x50, 0xd5, 0x54, 0x34, + 0xe3, 0x50, 0xd3, 0x4f, 0x3c, 0x12, 0x10, 0x4e, 0x60, 0x4f, 0xcb, 0x23, 0xe2, 0x78, 0x50, 0xd7, + 0xb2, 0xe3, 0x06, 0xd0, 0xc0, 0xcf, 0x11, 0x0c, 0x9d, 0x29, 0x09, 0xfc, 0x90, 0xfc, 0xea, 0xd2, + 0x90, 0x33, 0x1a, 0xc0, 0xbe, 0x36, 0x0e, 0xfd, 0xd0, 0x83, 0x26, 0x6e, 0xa3, 0x7d, 0x77, 0x44, + 0xdc, 0xb1, 0x1f, 0x42, 0x0b, 0x1f, 0xa0, 0xa6, 0x29, 0xe8, 0x8c, 0x03, 0x32, 0x12, 0x0d, 0x43, + 0xe2, 0x72, 0x68, 0xeb, 0x17, 0x2e, 0x8d, 0xde, 0xc3, 0x01, 0x6e, 0xa1, 0xba, 0x47, 0x86, 0xb3, + 0x4b, 0xe8, 0x68, 0x18, 0x38, 0x43, 0x12, 0x40, 0x57, 0xeb, 0x81, 0x1f, 0x8e, 0xe1, 0xd0, 0x20, + 0xea, 0x8e, 0x01, 0xb4, 0x3c, 0x21, 0xec, 0x92, 0xc0, 0x11, 0xee, 0x22, 0x34, 0x19, 0x3b, 0x2e, + 0xf7, 0xaf, 0x7c, 0xfe, 0x1e, 0x70, 0x59, 0xbb, 0x4e, 0x40, 0x42, 0xcf, 0x61, 0xf0, 0xcc, 0x58, + 0xc7, 0x2e, 0x0d, 0xe0, 0x39, 0x3e, 0x42, 0x9d, 0xc9, 0x98, 0x11, 0xcf, 0x67, 0xc4, 0xe5, 0x8c, + 0x5c, 0xc0, 0x0b, 0x7c, 0x88, 0xda, 0x93, 0xf1, 0x3b, 0xca, 0xc6, 0xd3, 0xc8, 0x71, 0x09, 0x7c, + 0xa6, 0x67, 0x4c, 0xe8, 0x15, 0x81, 0xcf, 0xf5, 0x92, 0x34, 0xe2, 0x3e, 0x0d, 0xa7, 0x60, 0xe9, + 0xae, 0x94, 0x79, 0x84, 0x45, 0x0e, 0x77, 0x47, 0xf0, 0x85, 0xee, 0x5a, 0xc2, 0x63, 0x93, 0x17, + 0xf3, 0xe1, 0x4b, 0x7d, 0x63, 0xc4, 0x68, 0x74, 0xa1, 0xcf, 0x7f, 0x89, 0x3b, 0xa8, 0xa5, 0xab, + 0xd2, 0xf5, 0x95, 0x0e, 0x93, 0x11, 0x93, 0x4c, 0xaf, 0xc4, 0x11, 0x65, 0x1c, 0xbe, 0x36, 0xdf, + 0x85, 0x38, 0xcc, 0x1d, 0x81, 0xad, 0x87, 0x4c, 0x47, 0xf4, 0xdd, 0x84, 0xf0, 0x11, 0xf5, 0xe0, + 0x95, 0x6e, 0x61, 0xd6, 0xfa, 0x79, 0x36, 0x89, 0xe0, 0x44, 0xcb, 0x9c, 0xfc, 0xc2, 0x37, 0xf6, + 0x6f, 0xf4, 0x0e, 0x9c, 0xe9, 0xad, 0x5f, 0x3f, 0xc0, 0x31, 0x7c, 0xab, 0x1b, 0xce, 0x42, 0x33, + 0xe8, 0x54, 0xbf, 0x98, 0x85, 0xdb, 0xdc, 0xbf, 0x2b, 0x35, 0x13, 0x66, 0x7f, 0x83, 0x75, 0x9c, + 0xdf, 0x1b, 0x1c, 0x79, 0x0e, 0x27, 0x70, 0x86, 0x5f, 0xa0, 0xa3, 0x12, 0x3f, 0x0d, 0xea, 0x07, + 0xfc, 0x0c, 0x1d, 0x5e, 0x11, 0x36, 0xf5, 0x69, 0xb8, 0xfd, 0xd8, 0x6f, 0x86, 0x3f, 0xfd, 0x7d, + 0xdf, 0xab, 0xfc, 0x73, 0xdf, 0xab, 0xfc, 0x7b, 0xdf, 0xab, 0xa0, 0x57, 0x32, 0x5b, 0x0e, 0x16, + 0xb7, 0xb2, 0x88, 0xaf, 0x65, 0x91, 0xc4, 0xd9, 0xdd, 0x20, 0xce, 0x64, 0x9a, 0xcb, 0x24, 0x16, + 0x9b, 0x5f, 0xc5, 0xb0, 0xad, 0xff, 0xb0, 0x2e, 0xe6, 0x0b, 0x25, 0xb3, 0xbb, 0xff, 0x02, 0x00, + 0x00, 0xff, 0xff, 0xfb, 0xdc, 0x82, 0x50, 0x10, 0x05, 0x00, 0x00, } diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/sonde-go/events/log.pb.go b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/sonde-go/events/log.pb.go index fd066d4a..c6628519 100644 --- a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/sonde-go/events/log.pb.go +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/sonde-go/events/log.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: log.proto -// DO NOT EDIT! package events @@ -115,17 +114,17 @@ func init() { proto.RegisterType((*LogMessage)(nil), "events.LogMessage") proto.RegisterEnum("events.LogMessage_MessageType", LogMessage_MessageType_name, LogMessage_MessageType_value) } -func (m *LogMessage) Marshal() (data []byte, err error) { +func (m *LogMessage) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *LogMessage) MarshalTo(data []byte) (int, error) { +func (m *LogMessage) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int @@ -133,74 +132,74 @@ func (m *LogMessage) MarshalTo(data []byte) (int, error) { if m.Message == nil { return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("message") } else { - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintLog(data, i, uint64(len(m.Message))) - i += copy(data[i:], m.Message) + i = encodeVarintLog(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) } if m.MessageType == nil { return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("message_type") } else { - data[i] = 0x10 + dAtA[i] = 0x10 i++ - i = encodeVarintLog(data, i, uint64(*m.MessageType)) + i = encodeVarintLog(dAtA, i, uint64(*m.MessageType)) } if m.Timestamp == nil { return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("timestamp") } else { - data[i] = 0x18 + dAtA[i] = 0x18 i++ - i = encodeVarintLog(data, i, uint64(*m.Timestamp)) + i = encodeVarintLog(dAtA, i, uint64(*m.Timestamp)) } if m.AppId != nil { - data[i] = 0x22 + dAtA[i] = 0x22 i++ - i = encodeVarintLog(data, i, uint64(len(*m.AppId))) - i += copy(data[i:], *m.AppId) + i = encodeVarintLog(dAtA, i, uint64(len(*m.AppId))) + i += copy(dAtA[i:], *m.AppId) } if m.SourceType != nil { - data[i] = 0x2a + dAtA[i] = 0x2a i++ - i = encodeVarintLog(data, i, uint64(len(*m.SourceType))) - i += copy(data[i:], *m.SourceType) + i = encodeVarintLog(dAtA, i, uint64(len(*m.SourceType))) + i += copy(dAtA[i:], *m.SourceType) } if m.SourceInstance != nil { - data[i] = 0x32 + dAtA[i] = 0x32 i++ - i = encodeVarintLog(data, i, uint64(len(*m.SourceInstance))) - i += copy(data[i:], *m.SourceInstance) + i = encodeVarintLog(dAtA, i, uint64(len(*m.SourceInstance))) + i += copy(dAtA[i:], *m.SourceInstance) } if m.XXX_unrecognized != nil { - i += copy(data[i:], m.XXX_unrecognized) + i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } -func encodeFixed64Log(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) +func encodeFixed64Log(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) return offset + 8 } -func encodeFixed32Log(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) +func encodeFixed32Log(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) return offset + 4 } -func encodeVarintLog(data []byte, offset int, v uint64) int { +func encodeVarintLog(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) + dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } - data[offset] = uint8(v) + dAtA[offset] = uint8(v) return offset + 1 } func (m *LogMessage) Size() (n int) { @@ -247,9 +246,9 @@ func sovLog(x uint64) (n int) { func sozLog(x uint64) (n int) { return sovLog(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (m *LogMessage) Unmarshal(data []byte) error { +func (m *LogMessage) Unmarshal(dAtA []byte) error { var hasFields [1]uint64 - l := len(data) + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -261,7 +260,7 @@ func (m *LogMessage) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -289,7 +288,7 @@ func (m *LogMessage) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -303,7 +302,7 @@ func (m *LogMessage) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - m.Message = append(m.Message[:0], data[iNdEx:postIndex]...) + m.Message = append(m.Message[:0], dAtA[iNdEx:postIndex]...) if m.Message == nil { m.Message = []byte{} } @@ -321,7 +320,7 @@ func (m *LogMessage) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (LogMessage_MessageType(b) & 0x7F) << shift if b < 0x80 { @@ -342,7 +341,7 @@ func (m *LogMessage) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int64(b) & 0x7F) << shift if b < 0x80 { @@ -363,7 +362,7 @@ func (m *LogMessage) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -378,7 +377,7 @@ func (m *LogMessage) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(data[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) m.AppId = &s iNdEx = postIndex case 5: @@ -393,7 +392,7 @@ func (m *LogMessage) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -408,7 +407,7 @@ func (m *LogMessage) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(data[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) m.SourceType = &s iNdEx = postIndex case 6: @@ -423,7 +422,7 @@ func (m *LogMessage) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -438,12 +437,12 @@ func (m *LogMessage) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(data[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) m.SourceInstance = &s iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipLog(data[iNdEx:]) + skippy, err := skipLog(dAtA[iNdEx:]) if err != nil { return err } @@ -453,7 +452,7 @@ func (m *LogMessage) Unmarshal(data []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -472,8 +471,8 @@ func (m *LogMessage) Unmarshal(data []byte) error { } return nil } -func skipLog(data []byte) (n int, err error) { - l := len(data) +func skipLog(dAtA []byte) (n int, err error) { + l := len(dAtA) iNdEx := 0 for iNdEx < l { var wire uint64 @@ -484,7 +483,7 @@ func skipLog(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -502,7 +501,7 @@ func skipLog(data []byte) (n int, err error) { return 0, io.ErrUnexpectedEOF } iNdEx++ - if data[iNdEx-1] < 0x80 { + if dAtA[iNdEx-1] < 0x80 { break } } @@ -519,7 +518,7 @@ func skipLog(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ length |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -542,7 +541,7 @@ func skipLog(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ innerWire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -553,7 +552,7 @@ func skipLog(data []byte) (n int, err error) { if innerWireType == 4 { break } - next, err := skipLog(data[start:]) + next, err := skipLog(dAtA[start:]) if err != nil { return 0, err } @@ -580,23 +579,24 @@ var ( func init() { proto.RegisterFile("log.proto", fileDescriptorLog) } var fileDescriptorLog = []byte{ - // 288 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x4c, 0x50, 0xc1, 0x4a, 0x03, 0x31, - 0x10, 0x65, 0xb7, 0xb6, 0xa5, 0xd3, 0x52, 0x4b, 0x40, 0x08, 0x22, 0xb5, 0xf6, 0x62, 0x2f, 0xa6, - 0xe0, 0xd5, 0x93, 0x05, 0x85, 0x82, 0x22, 0x84, 0x7a, 0x2e, 0xdb, 0x4d, 0x1a, 0x17, 0xba, 0x3b, - 0x21, 0x9b, 0x15, 0xf6, 0x13, 0xfc, 0x33, 0x8f, 0x7e, 0x82, 0xf8, 0x25, 0xa6, 0xd9, 0xc8, 0x7a, - 0x18, 0xf2, 0xde, 0x9b, 0x37, 0x6f, 0x92, 0xc0, 0xe0, 0x80, 0x8a, 0x69, 0x83, 0x16, 0x49, 0x4f, - 0xbe, 0xcb, 0xc2, 0x96, 0xe7, 0x37, 0x2a, 0xb3, 0x6f, 0xd5, 0x8e, 0xa5, 0x98, 0x2f, 0x15, 0x2a, - 0x5c, 0xfa, 0xf6, 0xae, 0xda, 0x7b, 0xe6, 0x89, 0x47, 0xcd, 0xd8, 0xfc, 0x23, 0x06, 0x78, 0x42, - 0xf5, 0x2c, 0xcb, 0x32, 0x51, 0x92, 0x50, 0xe8, 0xe7, 0x0d, 0xa4, 0xd1, 0x2c, 0x5e, 0x8c, 0xf8, - 0x1f, 0x25, 0xf7, 0x30, 0x0a, 0x70, 0x6b, 0x6b, 0x2d, 0x69, 0xec, 0xda, 0xe3, 0xdb, 0x29, 0x6b, - 0xd6, 0xb2, 0x36, 0x83, 0x85, 0x73, 0xe3, 0x5c, 0x7c, 0x98, 0xb7, 0x84, 0x5c, 0xc0, 0xc0, 0x66, - 0x4e, 0xb0, 0x49, 0xae, 0x69, 0xc7, 0xcd, 0x77, 0x78, 0x2b, 0x90, 0x33, 0xe8, 0x25, 0x5a, 0x6f, - 0x33, 0x41, 0x4f, 0x66, 0xd1, 0x62, 0xc0, 0xbb, 0x8e, 0xad, 0x05, 0xb9, 0x84, 0x61, 0x89, 0x95, - 0x49, 0xc3, 0xda, 0xae, 0xef, 0x41, 0x23, 0xf9, 0xd4, 0x6b, 0x38, 0x0d, 0x86, 0xac, 0x70, 0x49, - 0x45, 0x2a, 0x69, 0xcf, 0x9b, 0xc6, 0x8d, 0xbc, 0x0e, 0xea, 0xdc, 0x25, 0xfd, 0xbb, 0x1a, 0xe9, - 0x43, 0xe7, 0xe5, 0x75, 0x33, 0x89, 0x8e, 0xe0, 0x81, 0xf3, 0x49, 0xbc, 0xba, 0xfb, 0xfc, 0x99, - 0x46, 0x5f, 0xae, 0xbe, 0x5d, 0xc1, 0x15, 0x1a, 0xc5, 0xd2, 0x03, 0x56, 0x62, 0x8f, 0x55, 0x21, - 0x4c, 0xcd, 0x84, 0x41, 0x5d, 0x62, 0x21, 0x64, 0x78, 0xf4, 0xea, 0xf8, 0x73, 0x8f, 0x49, 0x6a, - 0xd1, 0xd4, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xb0, 0x4c, 0x0b, 0xe4, 0x8b, 0x01, 0x00, 0x00, + // 294 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x4c, 0x90, 0xd1, 0x4a, 0xc3, 0x30, + 0x14, 0x86, 0x69, 0xe7, 0x36, 0x76, 0x36, 0xe6, 0x08, 0x08, 0x45, 0xa4, 0xab, 0xbb, 0xb1, 0x37, + 0x66, 0xe0, 0xad, 0x57, 0x0e, 0x14, 0x06, 0x8a, 0x50, 0xe6, 0xf5, 0xc8, 0x9a, 0x2c, 0x16, 0xd6, + 0x9e, 0x90, 0xa4, 0x42, 0x1f, 0xc1, 0x37, 0xf3, 0xd2, 0x47, 0x90, 0x3d, 0x89, 0x2c, 0x8d, 0xd4, + 0xab, 0xfc, 0xff, 0x77, 0x4e, 0xce, 0x9f, 0x1c, 0x18, 0x1d, 0x50, 0x52, 0xa5, 0xd1, 0x22, 0x19, + 0x88, 0x0f, 0x51, 0x59, 0x73, 0x79, 0x2b, 0x0b, 0xfb, 0x5e, 0xef, 0x68, 0x8e, 0xe5, 0x52, 0xa2, + 0xc4, 0xa5, 0x2b, 0xef, 0xea, 0xbd, 0x73, 0xce, 0x38, 0xd5, 0x5e, 0x5b, 0x7c, 0x86, 0x00, 0xcf, + 0x28, 0x5f, 0x84, 0x31, 0x4c, 0x0a, 0x12, 0xc1, 0xb0, 0x6c, 0x65, 0x14, 0x24, 0x61, 0x3a, 0xc9, + 0xfe, 0x2c, 0x79, 0x80, 0x89, 0x97, 0x5b, 0xdb, 0x28, 0x11, 0x85, 0x49, 0x98, 0x4e, 0xef, 0x62, + 0xda, 0xc6, 0xd2, 0x6e, 0x06, 0xf5, 0xe7, 0xa6, 0x51, 0x22, 0x1b, 0x97, 0x9d, 0x21, 0x57, 0x30, + 0xb2, 0x45, 0x29, 0x8c, 0x65, 0xa5, 0x8a, 0x7a, 0x49, 0x98, 0xf6, 0xb2, 0x0e, 0x90, 0x0b, 0x18, + 0x30, 0xa5, 0xb6, 0x05, 0x8f, 0xce, 0x92, 0x20, 0x1d, 0x65, 0x7d, 0xa6, 0xd4, 0x9a, 0x93, 0x39, + 0x8c, 0x0d, 0xd6, 0x3a, 0xf7, 0xb1, 0x7d, 0x57, 0x83, 0x16, 0xb9, 0xa9, 0x37, 0x70, 0xee, 0x1b, + 0x8a, 0xca, 0x58, 0x56, 0xe5, 0x22, 0x1a, 0xb8, 0xa6, 0x69, 0x8b, 0xd7, 0x9e, 0x2e, 0xe6, 0x30, + 0xfe, 0xf7, 0x34, 0x32, 0x84, 0xde, 0xeb, 0xdb, 0x66, 0x16, 0x9c, 0xc4, 0x63, 0x96, 0xcd, 0xc2, + 0xd5, 0xfd, 0xd7, 0x31, 0x0e, 0xbe, 0x8f, 0x71, 0xf0, 0x73, 0x8c, 0x03, 0xb8, 0x46, 0x2d, 0x69, + 0x7e, 0xc0, 0x9a, 0xef, 0xb1, 0xae, 0xb8, 0x6e, 0x28, 0xd7, 0xa8, 0x0c, 0x56, 0x5c, 0xf8, 0x4f, + 0xaf, 0x4e, 0x9b, 0x7b, 0x62, 0xb9, 0x45, 0xdd, 0xfc, 0x06, 0x00, 0x00, 0xff, 0xff, 0xb0, 0x4c, + 0x0b, 0xe4, 0x8b, 0x01, 0x00, 0x00, } diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/sonde-go/events/metric.pb.go b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/sonde-go/events/metric.pb.go index 1820dad8..ff3524be 100644 --- a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/sonde-go/events/metric.pb.go +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/sonde-go/events/metric.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: metric.proto -// DO NOT EDIT! package events @@ -157,17 +156,17 @@ func init() { proto.RegisterType((*CounterEvent)(nil), "events.CounterEvent") proto.RegisterType((*ContainerMetric)(nil), "events.ContainerMetric") } -func (m *ValueMetric) Marshal() (data []byte, err error) { +func (m *ValueMetric) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *ValueMetric) MarshalTo(data []byte) (int, error) { +func (m *ValueMetric) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int @@ -175,43 +174,43 @@ func (m *ValueMetric) MarshalTo(data []byte) (int, error) { if m.Name == nil { return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("name") } else { - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintMetric(data, i, uint64(len(*m.Name))) - i += copy(data[i:], *m.Name) + i = encodeVarintMetric(dAtA, i, uint64(len(*m.Name))) + i += copy(dAtA[i:], *m.Name) } if m.Value == nil { return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("value") } else { - data[i] = 0x11 + dAtA[i] = 0x11 i++ - i = encodeFixed64Metric(data, i, uint64(math.Float64bits(float64(*m.Value)))) + i = encodeFixed64Metric(dAtA, i, uint64(math.Float64bits(float64(*m.Value)))) } if m.Unit == nil { return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("unit") } else { - data[i] = 0x1a + dAtA[i] = 0x1a i++ - i = encodeVarintMetric(data, i, uint64(len(*m.Unit))) - i += copy(data[i:], *m.Unit) + i = encodeVarintMetric(dAtA, i, uint64(len(*m.Unit))) + i += copy(dAtA[i:], *m.Unit) } if m.XXX_unrecognized != nil { - i += copy(data[i:], m.XXX_unrecognized) + i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } -func (m *CounterEvent) Marshal() (data []byte, err error) { +func (m *CounterEvent) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *CounterEvent) MarshalTo(data []byte) (int, error) { +func (m *CounterEvent) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int @@ -219,40 +218,40 @@ func (m *CounterEvent) MarshalTo(data []byte) (int, error) { if m.Name == nil { return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("name") } else { - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintMetric(data, i, uint64(len(*m.Name))) - i += copy(data[i:], *m.Name) + i = encodeVarintMetric(dAtA, i, uint64(len(*m.Name))) + i += copy(dAtA[i:], *m.Name) } if m.Delta == nil { return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("delta") } else { - data[i] = 0x10 + dAtA[i] = 0x10 i++ - i = encodeVarintMetric(data, i, uint64(*m.Delta)) + i = encodeVarintMetric(dAtA, i, uint64(*m.Delta)) } if m.Total != nil { - data[i] = 0x18 + dAtA[i] = 0x18 i++ - i = encodeVarintMetric(data, i, uint64(*m.Total)) + i = encodeVarintMetric(dAtA, i, uint64(*m.Total)) } if m.XXX_unrecognized != nil { - i += copy(data[i:], m.XXX_unrecognized) + i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } -func (m *ContainerMetric) Marshal() (data []byte, err error) { +func (m *ContainerMetric) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *ContainerMetric) MarshalTo(data []byte) (int, error) { +func (m *ContainerMetric) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int @@ -260,80 +259,80 @@ func (m *ContainerMetric) MarshalTo(data []byte) (int, error) { if m.ApplicationId == nil { return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("applicationId") } else { - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintMetric(data, i, uint64(len(*m.ApplicationId))) - i += copy(data[i:], *m.ApplicationId) + i = encodeVarintMetric(dAtA, i, uint64(len(*m.ApplicationId))) + i += copy(dAtA[i:], *m.ApplicationId) } if m.InstanceIndex == nil { return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("instanceIndex") } else { - data[i] = 0x10 + dAtA[i] = 0x10 i++ - i = encodeVarintMetric(data, i, uint64(*m.InstanceIndex)) + i = encodeVarintMetric(dAtA, i, uint64(*m.InstanceIndex)) } if m.CpuPercentage == nil { return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("cpuPercentage") } else { - data[i] = 0x19 + dAtA[i] = 0x19 i++ - i = encodeFixed64Metric(data, i, uint64(math.Float64bits(float64(*m.CpuPercentage)))) + i = encodeFixed64Metric(dAtA, i, uint64(math.Float64bits(float64(*m.CpuPercentage)))) } if m.MemoryBytes == nil { return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("memoryBytes") } else { - data[i] = 0x20 + dAtA[i] = 0x20 i++ - i = encodeVarintMetric(data, i, uint64(*m.MemoryBytes)) + i = encodeVarintMetric(dAtA, i, uint64(*m.MemoryBytes)) } if m.DiskBytes == nil { return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("diskBytes") } else { - data[i] = 0x28 + dAtA[i] = 0x28 i++ - i = encodeVarintMetric(data, i, uint64(*m.DiskBytes)) + i = encodeVarintMetric(dAtA, i, uint64(*m.DiskBytes)) } if m.MemoryBytesQuota != nil { - data[i] = 0x30 + dAtA[i] = 0x30 i++ - i = encodeVarintMetric(data, i, uint64(*m.MemoryBytesQuota)) + i = encodeVarintMetric(dAtA, i, uint64(*m.MemoryBytesQuota)) } if m.DiskBytesQuota != nil { - data[i] = 0x38 + dAtA[i] = 0x38 i++ - i = encodeVarintMetric(data, i, uint64(*m.DiskBytesQuota)) + i = encodeVarintMetric(dAtA, i, uint64(*m.DiskBytesQuota)) } if m.XXX_unrecognized != nil { - i += copy(data[i:], m.XXX_unrecognized) + i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } -func encodeFixed64Metric(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) +func encodeFixed64Metric(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) return offset + 8 } -func encodeFixed32Metric(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) +func encodeFixed32Metric(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) return offset + 4 } -func encodeVarintMetric(data []byte, offset int, v uint64) int { +func encodeVarintMetric(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) + dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } - data[offset] = uint8(v) + dAtA[offset] = uint8(v) return offset + 1 } func (m *ValueMetric) Size() (n int) { @@ -419,9 +418,9 @@ func sovMetric(x uint64) (n int) { func sozMetric(x uint64) (n int) { return sovMetric(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (m *ValueMetric) Unmarshal(data []byte) error { +func (m *ValueMetric) Unmarshal(dAtA []byte) error { var hasFields [1]uint64 - l := len(data) + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -433,7 +432,7 @@ func (m *ValueMetric) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -461,7 +460,7 @@ func (m *ValueMetric) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -476,7 +475,7 @@ func (m *ValueMetric) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(data[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) m.Name = &s iNdEx = postIndex hasFields[0] |= uint64(0x00000001) @@ -489,14 +488,14 @@ func (m *ValueMetric) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } iNdEx += 8 - v = uint64(data[iNdEx-8]) - v |= uint64(data[iNdEx-7]) << 8 - v |= uint64(data[iNdEx-6]) << 16 - v |= uint64(data[iNdEx-5]) << 24 - v |= uint64(data[iNdEx-4]) << 32 - v |= uint64(data[iNdEx-3]) << 40 - v |= uint64(data[iNdEx-2]) << 48 - v |= uint64(data[iNdEx-1]) << 56 + v = uint64(dAtA[iNdEx-8]) + v |= uint64(dAtA[iNdEx-7]) << 8 + v |= uint64(dAtA[iNdEx-6]) << 16 + v |= uint64(dAtA[iNdEx-5]) << 24 + v |= uint64(dAtA[iNdEx-4]) << 32 + v |= uint64(dAtA[iNdEx-3]) << 40 + v |= uint64(dAtA[iNdEx-2]) << 48 + v |= uint64(dAtA[iNdEx-1]) << 56 v2 := float64(math.Float64frombits(v)) m.Value = &v2 hasFields[0] |= uint64(0x00000002) @@ -512,7 +511,7 @@ func (m *ValueMetric) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -527,13 +526,13 @@ func (m *ValueMetric) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(data[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) m.Unit = &s iNdEx = postIndex hasFields[0] |= uint64(0x00000004) default: iNdEx = preIndex - skippy, err := skipMetric(data[iNdEx:]) + skippy, err := skipMetric(dAtA[iNdEx:]) if err != nil { return err } @@ -543,7 +542,7 @@ func (m *ValueMetric) Unmarshal(data []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -562,9 +561,9 @@ func (m *ValueMetric) Unmarshal(data []byte) error { } return nil } -func (m *CounterEvent) Unmarshal(data []byte) error { +func (m *CounterEvent) Unmarshal(dAtA []byte) error { var hasFields [1]uint64 - l := len(data) + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -576,7 +575,7 @@ func (m *CounterEvent) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -604,7 +603,7 @@ func (m *CounterEvent) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -619,7 +618,7 @@ func (m *CounterEvent) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(data[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) m.Name = &s iNdEx = postIndex hasFields[0] |= uint64(0x00000001) @@ -635,7 +634,7 @@ func (m *CounterEvent) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -656,7 +655,7 @@ func (m *CounterEvent) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -666,7 +665,7 @@ func (m *CounterEvent) Unmarshal(data []byte) error { m.Total = &v default: iNdEx = preIndex - skippy, err := skipMetric(data[iNdEx:]) + skippy, err := skipMetric(dAtA[iNdEx:]) if err != nil { return err } @@ -676,7 +675,7 @@ func (m *CounterEvent) Unmarshal(data []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -692,9 +691,9 @@ func (m *CounterEvent) Unmarshal(data []byte) error { } return nil } -func (m *ContainerMetric) Unmarshal(data []byte) error { +func (m *ContainerMetric) Unmarshal(dAtA []byte) error { var hasFields [1]uint64 - l := len(data) + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -706,7 +705,7 @@ func (m *ContainerMetric) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -734,7 +733,7 @@ func (m *ContainerMetric) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ stringLen |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -749,7 +748,7 @@ func (m *ContainerMetric) Unmarshal(data []byte) error { if postIndex > l { return io.ErrUnexpectedEOF } - s := string(data[iNdEx:postIndex]) + s := string(dAtA[iNdEx:postIndex]) m.ApplicationId = &s iNdEx = postIndex hasFields[0] |= uint64(0x00000001) @@ -765,7 +764,7 @@ func (m *ContainerMetric) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (int32(b) & 0x7F) << shift if b < 0x80 { @@ -783,14 +782,14 @@ func (m *ContainerMetric) Unmarshal(data []byte) error { return io.ErrUnexpectedEOF } iNdEx += 8 - v = uint64(data[iNdEx-8]) - v |= uint64(data[iNdEx-7]) << 8 - v |= uint64(data[iNdEx-6]) << 16 - v |= uint64(data[iNdEx-5]) << 24 - v |= uint64(data[iNdEx-4]) << 32 - v |= uint64(data[iNdEx-3]) << 40 - v |= uint64(data[iNdEx-2]) << 48 - v |= uint64(data[iNdEx-1]) << 56 + v = uint64(dAtA[iNdEx-8]) + v |= uint64(dAtA[iNdEx-7]) << 8 + v |= uint64(dAtA[iNdEx-6]) << 16 + v |= uint64(dAtA[iNdEx-5]) << 24 + v |= uint64(dAtA[iNdEx-4]) << 32 + v |= uint64(dAtA[iNdEx-3]) << 40 + v |= uint64(dAtA[iNdEx-2]) << 48 + v |= uint64(dAtA[iNdEx-1]) << 56 v2 := float64(math.Float64frombits(v)) m.CpuPercentage = &v2 hasFields[0] |= uint64(0x00000004) @@ -806,7 +805,7 @@ func (m *ContainerMetric) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -827,7 +826,7 @@ func (m *ContainerMetric) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -848,7 +847,7 @@ func (m *ContainerMetric) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -868,7 +867,7 @@ func (m *ContainerMetric) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -878,7 +877,7 @@ func (m *ContainerMetric) Unmarshal(data []byte) error { m.DiskBytesQuota = &v default: iNdEx = preIndex - skippy, err := skipMetric(data[iNdEx:]) + skippy, err := skipMetric(dAtA[iNdEx:]) if err != nil { return err } @@ -888,7 +887,7 @@ func (m *ContainerMetric) Unmarshal(data []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -913,8 +912,8 @@ func (m *ContainerMetric) Unmarshal(data []byte) error { } return nil } -func skipMetric(data []byte) (n int, err error) { - l := len(data) +func skipMetric(dAtA []byte) (n int, err error) { + l := len(dAtA) iNdEx := 0 for iNdEx < l { var wire uint64 @@ -925,7 +924,7 @@ func skipMetric(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -943,7 +942,7 @@ func skipMetric(data []byte) (n int, err error) { return 0, io.ErrUnexpectedEOF } iNdEx++ - if data[iNdEx-1] < 0x80 { + if dAtA[iNdEx-1] < 0x80 { break } } @@ -960,7 +959,7 @@ func skipMetric(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ length |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -983,7 +982,7 @@ func skipMetric(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ innerWire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -994,7 +993,7 @@ func skipMetric(data []byte) (n int, err error) { if innerWireType == 4 { break } - next, err := skipMetric(data[start:]) + next, err := skipMetric(dAtA[start:]) if err != nil { return 0, err } @@ -1022,27 +1021,27 @@ func init() { proto.RegisterFile("metric.proto", fileDescriptorMetric) } var fileDescriptorMetric = []byte{ // 357 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0x92, 0xd1, 0x4a, 0x23, 0x31, - 0x14, 0x86, 0x99, 0x6e, 0xdb, 0xa5, 0x69, 0xbb, 0xbb, 0x84, 0xbd, 0x18, 0xca, 0xb2, 0x74, 0xcb, - 0x22, 0x22, 0x38, 0x7d, 0x03, 0x2f, 0x5a, 0x14, 0x8a, 0x28, 0x3a, 0x17, 0xde, 0xa7, 0x49, 0x3a, - 0x06, 0x67, 0x92, 0x21, 0x73, 0x52, 0xec, 0x93, 0xf8, 0x4a, 0x5e, 0xfa, 0x08, 0xe2, 0x93, 0x98, - 0x9c, 0x14, 0x6d, 0x15, 0x2f, 0x06, 0xce, 0xff, 0x9d, 0x3f, 0x7f, 0xce, 0x09, 0x43, 0x06, 0x95, - 0x04, 0xab, 0x78, 0x56, 0x5b, 0x03, 0x86, 0x76, 0xe5, 0x5a, 0x6a, 0x68, 0x46, 0xc7, 0x85, 0x82, - 0x5b, 0xb7, 0xcc, 0xb8, 0xa9, 0xa6, 0x85, 0x29, 0xcc, 0x14, 0xdb, 0x4b, 0xb7, 0x42, 0x85, 0x02, - 0xab, 0x78, 0x6c, 0x44, 0x9c, 0x53, 0x22, 0xd6, 0x93, 0x73, 0xd2, 0xbf, 0x61, 0xa5, 0x93, 0x17, - 0x98, 0x4b, 0x29, 0x69, 0x6b, 0x56, 0xc9, 0x34, 0x19, 0xb7, 0x0e, 0x7b, 0x39, 0xd6, 0xf4, 0x37, - 0xe9, 0xac, 0x83, 0x25, 0x6d, 0x79, 0x98, 0xe4, 0x51, 0x04, 0xa7, 0xd3, 0x0a, 0xd2, 0x6f, 0xd1, - 0x19, 0xea, 0xc9, 0x25, 0x19, 0xcc, 0x8d, 0xd3, 0x20, 0xed, 0x69, 0x18, 0xec, 0xab, 0x34, 0x21, - 0x4b, 0x60, 0x98, 0xd6, 0xce, 0xa3, 0x08, 0x14, 0x0c, 0xb0, 0xd2, 0xc7, 0x25, 0x81, 0xa2, 0x98, - 0x3c, 0xb4, 0xc8, 0xcf, 0xb9, 0xd1, 0xc0, 0x94, 0x96, 0x76, 0x3b, 0xe1, 0x7f, 0x32, 0x64, 0x75, - 0x5d, 0x2a, 0xce, 0x40, 0x19, 0xbd, 0x10, 0xdb, 0xf0, 0x7d, 0x18, 0x5c, 0x4a, 0x37, 0xc0, 0x34, - 0x97, 0x0b, 0x2d, 0xe4, 0x3d, 0xde, 0xd6, 0xc9, 0xf7, 0x61, 0x70, 0xf1, 0xda, 0x5d, 0x49, 0xcb, - 0xfd, 0xb4, 0xac, 0x90, 0xb8, 0x4c, 0x92, 0xef, 0x43, 0x3a, 0x26, 0xfd, 0x4a, 0x56, 0xc6, 0x6e, - 0x66, 0x1b, 0x90, 0x4d, 0xda, 0xc6, 0xb9, 0x77, 0x11, 0xfd, 0x43, 0x7a, 0x42, 0x35, 0x77, 0xb1, - 0xdf, 0xc1, 0xfe, 0x3b, 0xa0, 0x47, 0xe4, 0xd7, 0x8e, 0xf9, 0xda, 0xf9, 0xd5, 0xd2, 0x2e, 0xae, - 0xf9, 0x89, 0xd3, 0x03, 0xf2, 0xe3, 0xed, 0x60, 0x74, 0x7e, 0x47, 0xe7, 0x07, 0x3a, 0x3b, 0x79, - 0x7c, 0xf9, 0x9b, 0x3c, 0xf9, 0xef, 0xd9, 0x7f, 0xe4, 0x9f, 0xb1, 0x45, 0xc6, 0x4b, 0xe3, 0xc4, - 0xca, 0x3f, 0xbf, 0xb0, 0x9b, 0x4c, 0x58, 0x53, 0x37, 0xc6, 0x2f, 0x99, 0xc5, 0x5f, 0x64, 0x36, - 0x8c, 0xcf, 0x77, 0xc6, 0x38, 0xf8, 0x1b, 0x5f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xc6, 0xbe, 0x10, + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xdf, 0xaa, 0x13, 0x31, + 0x10, 0x87, 0xd9, 0xed, 0x1f, 0x69, 0xda, 0xaa, 0x04, 0x2f, 0x96, 0x22, 0x65, 0x2d, 0x22, 0x45, + 0x70, 0xfb, 0x06, 0x5e, 0xb4, 0x28, 0x14, 0x51, 0x74, 0x2f, 0xbc, 0x4f, 0x93, 0xe9, 0x1a, 0xdc, + 0xcd, 0x2c, 0xd9, 0x49, 0x71, 0x9f, 0xc4, 0x57, 0xf2, 0xd2, 0x47, 0x90, 0x3e, 0x89, 0x24, 0x29, + 0xda, 0x9e, 0xc3, 0xb9, 0x9b, 0xdf, 0x37, 0x5f, 0x66, 0x27, 0x4b, 0xd8, 0xac, 0x01, 0xb2, 0x5a, + 0x16, 0xad, 0x45, 0x42, 0x3e, 0x86, 0x13, 0x18, 0xea, 0x16, 0x6f, 0x2a, 0x4d, 0xdf, 0xdc, 0xa1, + 0x90, 0xd8, 0x6c, 0x2a, 0xac, 0x70, 0x13, 0xda, 0x07, 0x77, 0x0c, 0x29, 0x84, 0x50, 0xc5, 0x63, + 0x0b, 0xe6, 0x9c, 0x56, 0xb1, 0x5e, 0x7d, 0x60, 0xd3, 0xaf, 0xa2, 0x76, 0xf0, 0x31, 0xcc, 0xe5, + 0x9c, 0x0d, 0x8d, 0x68, 0x20, 0x4b, 0xf2, 0x74, 0x3d, 0x29, 0x43, 0xcd, 0x9f, 0xb1, 0xd1, 0xc9, + 0x2b, 0x59, 0x9a, 0xa7, 0xeb, 0xa4, 0x8c, 0xc1, 0x9b, 0xce, 0x68, 0xca, 0x06, 0xd1, 0xf4, 0xf5, + 0xea, 0x13, 0x9b, 0xed, 0xd0, 0x19, 0x02, 0xfb, 0xce, 0x2f, 0xf6, 0xd0, 0x34, 0x05, 0x35, 0x89, + 0x30, 0x6d, 0x58, 0xc6, 0xe0, 0x29, 0x21, 0x89, 0x3a, 0x1b, 0xe4, 0x89, 0xa7, 0x21, 0xac, 0x7e, + 0xa6, 0xec, 0xc9, 0x0e, 0x0d, 0x09, 0x6d, 0xc0, 0x5e, 0x36, 0x7c, 0xc9, 0xe6, 0xa2, 0x6d, 0x6b, + 0x2d, 0x05, 0x69, 0x34, 0x7b, 0x75, 0x19, 0x7e, 0x0b, 0xbd, 0xa5, 0x4d, 0x47, 0xc2, 0x48, 0xd8, + 0x1b, 0x05, 0x3f, 0xc2, 0xd7, 0x46, 0xe5, 0x2d, 0xf4, 0x96, 0x6c, 0xdd, 0x67, 0xb0, 0x12, 0x0c, + 0x89, 0x0a, 0xc2, 0x65, 0x92, 0xf2, 0x16, 0xf2, 0x9c, 0x4d, 0x1b, 0x68, 0xd0, 0xf6, 0xdb, 0x9e, + 0xa0, 0xcb, 0x86, 0x61, 0xef, 0x6b, 0xc4, 0x9f, 0xb3, 0x89, 0xd2, 0xdd, 0xf7, 0xd8, 0x1f, 0x85, + 0xfe, 0x7f, 0xc0, 0x5f, 0xb3, 0xa7, 0x57, 0xf2, 0x17, 0x87, 0x24, 0xb2, 0x71, 0xb8, 0xe6, 0x3d, + 0xce, 0x5f, 0xb1, 0xc7, 0xff, 0x0e, 0x46, 0xf3, 0x51, 0x30, 0xef, 0xd0, 0xed, 0xdb, 0x5f, 0xe7, + 0x65, 0xf2, 0xfb, 0xbc, 0x4c, 0xfe, 0x9c, 0x97, 0x09, 0x7b, 0x81, 0xb6, 0x2a, 0x64, 0x8d, 0x4e, + 0x1d, 0xd1, 0x19, 0x65, 0xfb, 0x42, 0x59, 0x6c, 0x3b, 0x34, 0x0a, 0x8a, 0xf8, 0x44, 0xb6, 0xf3, + 0xf8, 0xfb, 0xde, 0x0b, 0x49, 0x68, 0xfb, 0xbf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xc6, 0xbe, 0x10, 0xbe, 0x48, 0x02, 0x00, 0x00, } diff --git a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/sonde-go/events/uuid.pb.go b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/sonde-go/events/uuid.pb.go index eeb15ee2..7857bff6 100644 --- a/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/sonde-go/events/uuid.pb.go +++ b/src/stackdriver-nozzle/vendor/github.com/cloudfoundry/sonde-go/events/uuid.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: uuid.proto -// DO NOT EDIT! package events @@ -49,17 +48,17 @@ func (m *UUID) GetHigh() uint64 { func init() { proto.RegisterType((*UUID)(nil), "events.UUID") } -func (m *UUID) Marshal() (data []byte, err error) { +func (m *UUID) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } -func (m *UUID) MarshalTo(data []byte) (int, error) { +func (m *UUID) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int @@ -67,48 +66,48 @@ func (m *UUID) MarshalTo(data []byte) (int, error) { if m.Low == nil { return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("low") } else { - data[i] = 0x8 + dAtA[i] = 0x8 i++ - i = encodeVarintUuid(data, i, uint64(*m.Low)) + i = encodeVarintUuid(dAtA, i, uint64(*m.Low)) } if m.High == nil { return 0, github_com_gogo_protobuf_proto.NewRequiredNotSetError("high") } else { - data[i] = 0x10 + dAtA[i] = 0x10 i++ - i = encodeVarintUuid(data, i, uint64(*m.High)) + i = encodeVarintUuid(dAtA, i, uint64(*m.High)) } if m.XXX_unrecognized != nil { - i += copy(data[i:], m.XXX_unrecognized) + i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } -func encodeFixed64Uuid(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) +func encodeFixed64Uuid(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) return offset + 8 } -func encodeFixed32Uuid(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) +func encodeFixed32Uuid(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) return offset + 4 } -func encodeVarintUuid(data []byte, offset int, v uint64) int { +func encodeVarintUuid(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) + dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 offset++ } - data[offset] = uint8(v) + dAtA[offset] = uint8(v) return offset + 1 } func (m *UUID) Size() (n int) { @@ -139,9 +138,9 @@ func sovUuid(x uint64) (n int) { func sozUuid(x uint64) (n int) { return sovUuid(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } -func (m *UUID) Unmarshal(data []byte) error { +func (m *UUID) Unmarshal(dAtA []byte) error { var hasFields [1]uint64 - l := len(data) + l := len(dAtA) iNdEx := 0 for iNdEx < l { preIndex := iNdEx @@ -153,7 +152,7 @@ func (m *UUID) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -181,7 +180,7 @@ func (m *UUID) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -202,7 +201,7 @@ func (m *UUID) Unmarshal(data []byte) error { if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ v |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -213,7 +212,7 @@ func (m *UUID) Unmarshal(data []byte) error { hasFields[0] |= uint64(0x00000002) default: iNdEx = preIndex - skippy, err := skipUuid(data[iNdEx:]) + skippy, err := skipUuid(dAtA[iNdEx:]) if err != nil { return err } @@ -223,7 +222,7 @@ func (m *UUID) Unmarshal(data []byte) error { if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -239,8 +238,8 @@ func (m *UUID) Unmarshal(data []byte) error { } return nil } -func skipUuid(data []byte) (n int, err error) { - l := len(data) +func skipUuid(dAtA []byte) (n int, err error) { + l := len(dAtA) iNdEx := 0 for iNdEx < l { var wire uint64 @@ -251,7 +250,7 @@ func skipUuid(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -269,7 +268,7 @@ func skipUuid(data []byte) (n int, err error) { return 0, io.ErrUnexpectedEOF } iNdEx++ - if data[iNdEx-1] < 0x80 { + if dAtA[iNdEx-1] < 0x80 { break } } @@ -286,7 +285,7 @@ func skipUuid(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ length |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -309,7 +308,7 @@ func skipUuid(data []byte) (n int, err error) { if iNdEx >= l { return 0, io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ innerWire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -320,7 +319,7 @@ func skipUuid(data []byte) (n int, err error) { if innerWireType == 4 { break } - next, err := skipUuid(data[start:]) + next, err := skipUuid(dAtA[start:]) if err != nil { return 0, err } @@ -347,16 +346,16 @@ var ( func init() { proto.RegisterFile("uuid.proto", fileDescriptorUuid) } var fileDescriptorUuid = []byte{ - // 167 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0x2d, 0xcd, 0x4c, + // 171 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0x2d, 0xcd, 0x4c, 0xd1, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x4b, 0x2d, 0x4b, 0xcd, 0x2b, 0x29, 0x96, 0xd2, 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0x4f, 0xcf, 0xd7, 0x07, 0x4b, 0x27, 0x95, 0xa6, 0x81, 0x79, 0x60, 0x0e, 0x98, 0x05, 0xd1, 0xa6, 0xa4, 0xc3, 0xc5, 0x12, 0x1a, 0xea, 0xe9, 0x22, 0x24, 0xc0, 0xc5, 0x9c, 0x93, 0x5f, 0x2e, 0xc1, 0xa8, 0xc0, 0xa4, 0xc1, 0x12, 0x04, 0x62, 0x0a, 0x09, 0x71, 0xb1, 0x64, 0x64, 0xa6, 0x67, 0x48, 0x30, 0x81, 0x85, - 0xc0, 0x6c, 0x27, 0x9b, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x00, 0xf1, 0x03, 0x20, 0xe6, 0x52, 0xcc, - 0x2f, 0x4a, 0xd7, 0x4b, 0xce, 0xc9, 0x2f, 0x4d, 0x49, 0xcb, 0x2f, 0xcd, 0x4b, 0x29, 0xaa, 0xd4, - 0x4b, 0x29, 0xca, 0x2f, 0x28, 0xce, 0xcf, 0x4b, 0x49, 0xd5, 0x83, 0xb8, 0xc6, 0x89, 0x3b, 0x14, - 0xe8, 0x42, 0xb7, 0xc4, 0xe4, 0x92, 0xfc, 0xa2, 0x4a, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xc1, - 0x49, 0x67, 0x8e, 0xaf, 0x00, 0x00, 0x00, + 0xc0, 0x6c, 0x27, 0x9b, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, + 0x91, 0x4b, 0x31, 0xbf, 0x28, 0x5d, 0x2f, 0x39, 0x27, 0xbf, 0x34, 0x25, 0x2d, 0xbf, 0x34, 0x2f, + 0xa5, 0xa8, 0x52, 0x2f, 0xa5, 0x28, 0xbf, 0xa0, 0x38, 0x3f, 0x2f, 0x25, 0x55, 0x0f, 0xe2, 0x1a, + 0x27, 0xee, 0xd0, 0xd2, 0xcc, 0x14, 0xb7, 0xc4, 0xe4, 0x92, 0xfc, 0xa2, 0x4a, 0x40, 0x00, 0x00, + 0x00, 0xff, 0xff, 0xc1, 0x49, 0x67, 0x8e, 0xaf, 0x00, 0x00, 0x00, } diff --git a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/gogoproto/Makefile b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/gogoproto/Makefile index 395592a5..02f9c62c 100644 --- a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/gogoproto/Makefile +++ b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/gogoproto/Makefile @@ -27,6 +27,7 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. regenerate: + go install github.com/gogo/protobuf/protoc-gen-gogo protoc --gogo_out=Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor:. --proto_path=../../../../:../protobuf/:. *.proto restore: diff --git a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/gogoproto/doc.go b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/gogoproto/doc.go index 5ecfae11..147b5ecc 100644 --- a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/gogoproto/doc.go +++ b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/gogoproto/doc.go @@ -148,6 +148,7 @@ The enumprefix, getters and stringer extensions can be used to remove some of th - goproto_stringer, if false, the message is generated without the default string method, this is useful for rather using stringer, or allowing you to write your own string method. - goproto_extensions_map (beta), if false, the extensions field is generated as type []byte instead of type map[int32]proto.Extension - goproto_unrecognized (beta), if false, XXX_unrecognized field is not generated. This is useful in conjunction with gogoproto.nullable=false, to generate structures completely devoid of pointers and reduce GC pressure at the cost of losing information about unrecognized fields. + - goproto_registration (beta), if true, the generated files will register all messages and types against both gogo/protobuf and golang/protobuf. This is necessary when using third-party packages which read registrations from golang/protobuf (such as the grpc-gateway). Less Typing and Peace of Mind is explained in their specific plugin folders godoc: diff --git a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go index ff355b9b..fa88040f 100644 --- a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go +++ b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: gogo.proto -// DO NOT EDIT! /* Package gogoproto is a generated protocol buffer package. @@ -34,6 +33,7 @@ var E_GoprotoEnumPrefix = &proto.ExtensionDesc{ Field: 62001, Name: "gogoproto.goproto_enum_prefix", Tag: "varint,62001,opt,name=goproto_enum_prefix,json=goprotoEnumPrefix", + Filename: "gogo.proto", } var E_GoprotoEnumStringer = &proto.ExtensionDesc{ @@ -42,6 +42,7 @@ var E_GoprotoEnumStringer = &proto.ExtensionDesc{ Field: 62021, Name: "gogoproto.goproto_enum_stringer", Tag: "varint,62021,opt,name=goproto_enum_stringer,json=goprotoEnumStringer", + Filename: "gogo.proto", } var E_EnumStringer = &proto.ExtensionDesc{ @@ -50,6 +51,7 @@ var E_EnumStringer = &proto.ExtensionDesc{ Field: 62022, Name: "gogoproto.enum_stringer", Tag: "varint,62022,opt,name=enum_stringer,json=enumStringer", + Filename: "gogo.proto", } var E_EnumCustomname = &proto.ExtensionDesc{ @@ -58,6 +60,16 @@ var E_EnumCustomname = &proto.ExtensionDesc{ Field: 62023, Name: "gogoproto.enum_customname", Tag: "bytes,62023,opt,name=enum_customname,json=enumCustomname", + Filename: "gogo.proto", +} + +var E_Enumdecl = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62024, + Name: "gogoproto.enumdecl", + Tag: "varint,62024,opt,name=enumdecl", + Filename: "gogo.proto", } var E_EnumvalueCustomname = &proto.ExtensionDesc{ @@ -66,6 +78,7 @@ var E_EnumvalueCustomname = &proto.ExtensionDesc{ Field: 66001, Name: "gogoproto.enumvalue_customname", Tag: "bytes,66001,opt,name=enumvalue_customname,json=enumvalueCustomname", + Filename: "gogo.proto", } var E_GoprotoGettersAll = &proto.ExtensionDesc{ @@ -74,6 +87,7 @@ var E_GoprotoGettersAll = &proto.ExtensionDesc{ Field: 63001, Name: "gogoproto.goproto_getters_all", Tag: "varint,63001,opt,name=goproto_getters_all,json=goprotoGettersAll", + Filename: "gogo.proto", } var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{ @@ -82,6 +96,7 @@ var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{ Field: 63002, Name: "gogoproto.goproto_enum_prefix_all", Tag: "varint,63002,opt,name=goproto_enum_prefix_all,json=goprotoEnumPrefixAll", + Filename: "gogo.proto", } var E_GoprotoStringerAll = &proto.ExtensionDesc{ @@ -90,6 +105,7 @@ var E_GoprotoStringerAll = &proto.ExtensionDesc{ Field: 63003, Name: "gogoproto.goproto_stringer_all", Tag: "varint,63003,opt,name=goproto_stringer_all,json=goprotoStringerAll", + Filename: "gogo.proto", } var E_VerboseEqualAll = &proto.ExtensionDesc{ @@ -98,6 +114,7 @@ var E_VerboseEqualAll = &proto.ExtensionDesc{ Field: 63004, Name: "gogoproto.verbose_equal_all", Tag: "varint,63004,opt,name=verbose_equal_all,json=verboseEqualAll", + Filename: "gogo.proto", } var E_FaceAll = &proto.ExtensionDesc{ @@ -106,6 +123,7 @@ var E_FaceAll = &proto.ExtensionDesc{ Field: 63005, Name: "gogoproto.face_all", Tag: "varint,63005,opt,name=face_all,json=faceAll", + Filename: "gogo.proto", } var E_GostringAll = &proto.ExtensionDesc{ @@ -114,6 +132,7 @@ var E_GostringAll = &proto.ExtensionDesc{ Field: 63006, Name: "gogoproto.gostring_all", Tag: "varint,63006,opt,name=gostring_all,json=gostringAll", + Filename: "gogo.proto", } var E_PopulateAll = &proto.ExtensionDesc{ @@ -122,6 +141,7 @@ var E_PopulateAll = &proto.ExtensionDesc{ Field: 63007, Name: "gogoproto.populate_all", Tag: "varint,63007,opt,name=populate_all,json=populateAll", + Filename: "gogo.proto", } var E_StringerAll = &proto.ExtensionDesc{ @@ -130,6 +150,7 @@ var E_StringerAll = &proto.ExtensionDesc{ Field: 63008, Name: "gogoproto.stringer_all", Tag: "varint,63008,opt,name=stringer_all,json=stringerAll", + Filename: "gogo.proto", } var E_OnlyoneAll = &proto.ExtensionDesc{ @@ -138,6 +159,7 @@ var E_OnlyoneAll = &proto.ExtensionDesc{ Field: 63009, Name: "gogoproto.onlyone_all", Tag: "varint,63009,opt,name=onlyone_all,json=onlyoneAll", + Filename: "gogo.proto", } var E_EqualAll = &proto.ExtensionDesc{ @@ -146,6 +168,7 @@ var E_EqualAll = &proto.ExtensionDesc{ Field: 63013, Name: "gogoproto.equal_all", Tag: "varint,63013,opt,name=equal_all,json=equalAll", + Filename: "gogo.proto", } var E_DescriptionAll = &proto.ExtensionDesc{ @@ -154,6 +177,7 @@ var E_DescriptionAll = &proto.ExtensionDesc{ Field: 63014, Name: "gogoproto.description_all", Tag: "varint,63014,opt,name=description_all,json=descriptionAll", + Filename: "gogo.proto", } var E_TestgenAll = &proto.ExtensionDesc{ @@ -162,6 +186,7 @@ var E_TestgenAll = &proto.ExtensionDesc{ Field: 63015, Name: "gogoproto.testgen_all", Tag: "varint,63015,opt,name=testgen_all,json=testgenAll", + Filename: "gogo.proto", } var E_BenchgenAll = &proto.ExtensionDesc{ @@ -170,6 +195,7 @@ var E_BenchgenAll = &proto.ExtensionDesc{ Field: 63016, Name: "gogoproto.benchgen_all", Tag: "varint,63016,opt,name=benchgen_all,json=benchgenAll", + Filename: "gogo.proto", } var E_MarshalerAll = &proto.ExtensionDesc{ @@ -178,6 +204,7 @@ var E_MarshalerAll = &proto.ExtensionDesc{ Field: 63017, Name: "gogoproto.marshaler_all", Tag: "varint,63017,opt,name=marshaler_all,json=marshalerAll", + Filename: "gogo.proto", } var E_UnmarshalerAll = &proto.ExtensionDesc{ @@ -186,6 +213,7 @@ var E_UnmarshalerAll = &proto.ExtensionDesc{ Field: 63018, Name: "gogoproto.unmarshaler_all", Tag: "varint,63018,opt,name=unmarshaler_all,json=unmarshalerAll", + Filename: "gogo.proto", } var E_StableMarshalerAll = &proto.ExtensionDesc{ @@ -194,6 +222,7 @@ var E_StableMarshalerAll = &proto.ExtensionDesc{ Field: 63019, Name: "gogoproto.stable_marshaler_all", Tag: "varint,63019,opt,name=stable_marshaler_all,json=stableMarshalerAll", + Filename: "gogo.proto", } var E_SizerAll = &proto.ExtensionDesc{ @@ -202,6 +231,7 @@ var E_SizerAll = &proto.ExtensionDesc{ Field: 63020, Name: "gogoproto.sizer_all", Tag: "varint,63020,opt,name=sizer_all,json=sizerAll", + Filename: "gogo.proto", } var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{ @@ -210,6 +240,7 @@ var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{ Field: 63021, Name: "gogoproto.goproto_enum_stringer_all", Tag: "varint,63021,opt,name=goproto_enum_stringer_all,json=goprotoEnumStringerAll", + Filename: "gogo.proto", } var E_EnumStringerAll = &proto.ExtensionDesc{ @@ -218,6 +249,7 @@ var E_EnumStringerAll = &proto.ExtensionDesc{ Field: 63022, Name: "gogoproto.enum_stringer_all", Tag: "varint,63022,opt,name=enum_stringer_all,json=enumStringerAll", + Filename: "gogo.proto", } var E_UnsafeMarshalerAll = &proto.ExtensionDesc{ @@ -226,6 +258,7 @@ var E_UnsafeMarshalerAll = &proto.ExtensionDesc{ Field: 63023, Name: "gogoproto.unsafe_marshaler_all", Tag: "varint,63023,opt,name=unsafe_marshaler_all,json=unsafeMarshalerAll", + Filename: "gogo.proto", } var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{ @@ -234,6 +267,7 @@ var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{ Field: 63024, Name: "gogoproto.unsafe_unmarshaler_all", Tag: "varint,63024,opt,name=unsafe_unmarshaler_all,json=unsafeUnmarshalerAll", + Filename: "gogo.proto", } var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{ @@ -242,6 +276,7 @@ var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{ Field: 63025, Name: "gogoproto.goproto_extensions_map_all", Tag: "varint,63025,opt,name=goproto_extensions_map_all,json=goprotoExtensionsMapAll", + Filename: "gogo.proto", } var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{ @@ -250,6 +285,7 @@ var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{ Field: 63026, Name: "gogoproto.goproto_unrecognized_all", Tag: "varint,63026,opt,name=goproto_unrecognized_all,json=goprotoUnrecognizedAll", + Filename: "gogo.proto", } var E_GogoprotoImport = &proto.ExtensionDesc{ @@ -258,6 +294,7 @@ var E_GogoprotoImport = &proto.ExtensionDesc{ Field: 63027, Name: "gogoproto.gogoproto_import", Tag: "varint,63027,opt,name=gogoproto_import,json=gogoprotoImport", + Filename: "gogo.proto", } var E_ProtosizerAll = &proto.ExtensionDesc{ @@ -266,6 +303,7 @@ var E_ProtosizerAll = &proto.ExtensionDesc{ Field: 63028, Name: "gogoproto.protosizer_all", Tag: "varint,63028,opt,name=protosizer_all,json=protosizerAll", + Filename: "gogo.proto", } var E_CompareAll = &proto.ExtensionDesc{ @@ -274,6 +312,34 @@ var E_CompareAll = &proto.ExtensionDesc{ Field: 63029, Name: "gogoproto.compare_all", Tag: "varint,63029,opt,name=compare_all,json=compareAll", + Filename: "gogo.proto", +} + +var E_TypedeclAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63030, + Name: "gogoproto.typedecl_all", + Tag: "varint,63030,opt,name=typedecl_all,json=typedeclAll", + Filename: "gogo.proto", +} + +var E_EnumdeclAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63031, + Name: "gogoproto.enumdecl_all", + Tag: "varint,63031,opt,name=enumdecl_all,json=enumdeclAll", + Filename: "gogo.proto", +} + +var E_GoprotoRegistration = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63032, + Name: "gogoproto.goproto_registration", + Tag: "varint,63032,opt,name=goproto_registration,json=goprotoRegistration", + Filename: "gogo.proto", } var E_GoprotoGetters = &proto.ExtensionDesc{ @@ -282,6 +348,7 @@ var E_GoprotoGetters = &proto.ExtensionDesc{ Field: 64001, Name: "gogoproto.goproto_getters", Tag: "varint,64001,opt,name=goproto_getters,json=goprotoGetters", + Filename: "gogo.proto", } var E_GoprotoStringer = &proto.ExtensionDesc{ @@ -290,6 +357,7 @@ var E_GoprotoStringer = &proto.ExtensionDesc{ Field: 64003, Name: "gogoproto.goproto_stringer", Tag: "varint,64003,opt,name=goproto_stringer,json=goprotoStringer", + Filename: "gogo.proto", } var E_VerboseEqual = &proto.ExtensionDesc{ @@ -298,6 +366,7 @@ var E_VerboseEqual = &proto.ExtensionDesc{ Field: 64004, Name: "gogoproto.verbose_equal", Tag: "varint,64004,opt,name=verbose_equal,json=verboseEqual", + Filename: "gogo.proto", } var E_Face = &proto.ExtensionDesc{ @@ -306,6 +375,7 @@ var E_Face = &proto.ExtensionDesc{ Field: 64005, Name: "gogoproto.face", Tag: "varint,64005,opt,name=face", + Filename: "gogo.proto", } var E_Gostring = &proto.ExtensionDesc{ @@ -314,6 +384,7 @@ var E_Gostring = &proto.ExtensionDesc{ Field: 64006, Name: "gogoproto.gostring", Tag: "varint,64006,opt,name=gostring", + Filename: "gogo.proto", } var E_Populate = &proto.ExtensionDesc{ @@ -322,6 +393,7 @@ var E_Populate = &proto.ExtensionDesc{ Field: 64007, Name: "gogoproto.populate", Tag: "varint,64007,opt,name=populate", + Filename: "gogo.proto", } var E_Stringer = &proto.ExtensionDesc{ @@ -330,6 +402,7 @@ var E_Stringer = &proto.ExtensionDesc{ Field: 67008, Name: "gogoproto.stringer", Tag: "varint,67008,opt,name=stringer", + Filename: "gogo.proto", } var E_Onlyone = &proto.ExtensionDesc{ @@ -338,6 +411,7 @@ var E_Onlyone = &proto.ExtensionDesc{ Field: 64009, Name: "gogoproto.onlyone", Tag: "varint,64009,opt,name=onlyone", + Filename: "gogo.proto", } var E_Equal = &proto.ExtensionDesc{ @@ -346,6 +420,7 @@ var E_Equal = &proto.ExtensionDesc{ Field: 64013, Name: "gogoproto.equal", Tag: "varint,64013,opt,name=equal", + Filename: "gogo.proto", } var E_Description = &proto.ExtensionDesc{ @@ -354,6 +429,7 @@ var E_Description = &proto.ExtensionDesc{ Field: 64014, Name: "gogoproto.description", Tag: "varint,64014,opt,name=description", + Filename: "gogo.proto", } var E_Testgen = &proto.ExtensionDesc{ @@ -362,6 +438,7 @@ var E_Testgen = &proto.ExtensionDesc{ Field: 64015, Name: "gogoproto.testgen", Tag: "varint,64015,opt,name=testgen", + Filename: "gogo.proto", } var E_Benchgen = &proto.ExtensionDesc{ @@ -370,6 +447,7 @@ var E_Benchgen = &proto.ExtensionDesc{ Field: 64016, Name: "gogoproto.benchgen", Tag: "varint,64016,opt,name=benchgen", + Filename: "gogo.proto", } var E_Marshaler = &proto.ExtensionDesc{ @@ -378,6 +456,7 @@ var E_Marshaler = &proto.ExtensionDesc{ Field: 64017, Name: "gogoproto.marshaler", Tag: "varint,64017,opt,name=marshaler", + Filename: "gogo.proto", } var E_Unmarshaler = &proto.ExtensionDesc{ @@ -386,6 +465,7 @@ var E_Unmarshaler = &proto.ExtensionDesc{ Field: 64018, Name: "gogoproto.unmarshaler", Tag: "varint,64018,opt,name=unmarshaler", + Filename: "gogo.proto", } var E_StableMarshaler = &proto.ExtensionDesc{ @@ -394,6 +474,7 @@ var E_StableMarshaler = &proto.ExtensionDesc{ Field: 64019, Name: "gogoproto.stable_marshaler", Tag: "varint,64019,opt,name=stable_marshaler,json=stableMarshaler", + Filename: "gogo.proto", } var E_Sizer = &proto.ExtensionDesc{ @@ -402,6 +483,7 @@ var E_Sizer = &proto.ExtensionDesc{ Field: 64020, Name: "gogoproto.sizer", Tag: "varint,64020,opt,name=sizer", + Filename: "gogo.proto", } var E_UnsafeMarshaler = &proto.ExtensionDesc{ @@ -410,6 +492,7 @@ var E_UnsafeMarshaler = &proto.ExtensionDesc{ Field: 64023, Name: "gogoproto.unsafe_marshaler", Tag: "varint,64023,opt,name=unsafe_marshaler,json=unsafeMarshaler", + Filename: "gogo.proto", } var E_UnsafeUnmarshaler = &proto.ExtensionDesc{ @@ -418,6 +501,7 @@ var E_UnsafeUnmarshaler = &proto.ExtensionDesc{ Field: 64024, Name: "gogoproto.unsafe_unmarshaler", Tag: "varint,64024,opt,name=unsafe_unmarshaler,json=unsafeUnmarshaler", + Filename: "gogo.proto", } var E_GoprotoExtensionsMap = &proto.ExtensionDesc{ @@ -426,6 +510,7 @@ var E_GoprotoExtensionsMap = &proto.ExtensionDesc{ Field: 64025, Name: "gogoproto.goproto_extensions_map", Tag: "varint,64025,opt,name=goproto_extensions_map,json=goprotoExtensionsMap", + Filename: "gogo.proto", } var E_GoprotoUnrecognized = &proto.ExtensionDesc{ @@ -434,6 +519,7 @@ var E_GoprotoUnrecognized = &proto.ExtensionDesc{ Field: 64026, Name: "gogoproto.goproto_unrecognized", Tag: "varint,64026,opt,name=goproto_unrecognized,json=goprotoUnrecognized", + Filename: "gogo.proto", } var E_Protosizer = &proto.ExtensionDesc{ @@ -442,6 +528,7 @@ var E_Protosizer = &proto.ExtensionDesc{ Field: 64028, Name: "gogoproto.protosizer", Tag: "varint,64028,opt,name=protosizer", + Filename: "gogo.proto", } var E_Compare = &proto.ExtensionDesc{ @@ -450,6 +537,16 @@ var E_Compare = &proto.ExtensionDesc{ Field: 64029, Name: "gogoproto.compare", Tag: "varint,64029,opt,name=compare", + Filename: "gogo.proto", +} + +var E_Typedecl = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64030, + Name: "gogoproto.typedecl", + Tag: "varint,64030,opt,name=typedecl", + Filename: "gogo.proto", } var E_Nullable = &proto.ExtensionDesc{ @@ -458,6 +555,7 @@ var E_Nullable = &proto.ExtensionDesc{ Field: 65001, Name: "gogoproto.nullable", Tag: "varint,65001,opt,name=nullable", + Filename: "gogo.proto", } var E_Embed = &proto.ExtensionDesc{ @@ -466,6 +564,7 @@ var E_Embed = &proto.ExtensionDesc{ Field: 65002, Name: "gogoproto.embed", Tag: "varint,65002,opt,name=embed", + Filename: "gogo.proto", } var E_Customtype = &proto.ExtensionDesc{ @@ -474,6 +573,7 @@ var E_Customtype = &proto.ExtensionDesc{ Field: 65003, Name: "gogoproto.customtype", Tag: "bytes,65003,opt,name=customtype", + Filename: "gogo.proto", } var E_Customname = &proto.ExtensionDesc{ @@ -482,6 +582,7 @@ var E_Customname = &proto.ExtensionDesc{ Field: 65004, Name: "gogoproto.customname", Tag: "bytes,65004,opt,name=customname", + Filename: "gogo.proto", } var E_Jsontag = &proto.ExtensionDesc{ @@ -490,6 +591,7 @@ var E_Jsontag = &proto.ExtensionDesc{ Field: 65005, Name: "gogoproto.jsontag", Tag: "bytes,65005,opt,name=jsontag", + Filename: "gogo.proto", } var E_Moretags = &proto.ExtensionDesc{ @@ -498,6 +600,7 @@ var E_Moretags = &proto.ExtensionDesc{ Field: 65006, Name: "gogoproto.moretags", Tag: "bytes,65006,opt,name=moretags", + Filename: "gogo.proto", } var E_Casttype = &proto.ExtensionDesc{ @@ -506,6 +609,7 @@ var E_Casttype = &proto.ExtensionDesc{ Field: 65007, Name: "gogoproto.casttype", Tag: "bytes,65007,opt,name=casttype", + Filename: "gogo.proto", } var E_Castkey = &proto.ExtensionDesc{ @@ -514,6 +618,7 @@ var E_Castkey = &proto.ExtensionDesc{ Field: 65008, Name: "gogoproto.castkey", Tag: "bytes,65008,opt,name=castkey", + Filename: "gogo.proto", } var E_Castvalue = &proto.ExtensionDesc{ @@ -522,6 +627,25 @@ var E_Castvalue = &proto.ExtensionDesc{ Field: 65009, Name: "gogoproto.castvalue", Tag: "bytes,65009,opt,name=castvalue", + Filename: "gogo.proto", +} + +var E_Stdtime = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65010, + Name: "gogoproto.stdtime", + Tag: "varint,65010,opt,name=stdtime", + Filename: "gogo.proto", +} + +var E_Stdduration = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65011, + Name: "gogoproto.stdduration", + Tag: "varint,65011,opt,name=stdduration", + Filename: "gogo.proto", } func init() { @@ -529,6 +653,7 @@ func init() { proto.RegisterExtension(E_GoprotoEnumStringer) proto.RegisterExtension(E_EnumStringer) proto.RegisterExtension(E_EnumCustomname) + proto.RegisterExtension(E_Enumdecl) proto.RegisterExtension(E_EnumvalueCustomname) proto.RegisterExtension(E_GoprotoGettersAll) proto.RegisterExtension(E_GoprotoEnumPrefixAll) @@ -556,6 +681,9 @@ func init() { proto.RegisterExtension(E_GogoprotoImport) proto.RegisterExtension(E_ProtosizerAll) proto.RegisterExtension(E_CompareAll) + proto.RegisterExtension(E_TypedeclAll) + proto.RegisterExtension(E_EnumdeclAll) + proto.RegisterExtension(E_GoprotoRegistration) proto.RegisterExtension(E_GoprotoGetters) proto.RegisterExtension(E_GoprotoStringer) proto.RegisterExtension(E_VerboseEqual) @@ -578,6 +706,7 @@ func init() { proto.RegisterExtension(E_GoprotoUnrecognized) proto.RegisterExtension(E_Protosizer) proto.RegisterExtension(E_Compare) + proto.RegisterExtension(E_Typedecl) proto.RegisterExtension(E_Nullable) proto.RegisterExtension(E_Embed) proto.RegisterExtension(E_Customtype) @@ -587,79 +716,88 @@ func init() { proto.RegisterExtension(E_Casttype) proto.RegisterExtension(E_Castkey) proto.RegisterExtension(E_Castvalue) + proto.RegisterExtension(E_Stdtime) + proto.RegisterExtension(E_Stdduration) } func init() { proto.RegisterFile("gogo.proto", fileDescriptorGogo) } var fileDescriptorGogo = []byte{ - // 1096 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x97, 0xcb, 0x6f, 0xdc, 0x54, - 0x14, 0x87, 0x85, 0x48, 0x95, 0x99, 0x93, 0x17, 0x99, 0x84, 0x50, 0x2a, 0x10, 0xed, 0x8e, 0x55, - 0xba, 0x42, 0xa8, 0xae, 0x10, 0x6a, 0xab, 0x34, 0x2a, 0x22, 0x10, 0x05, 0x52, 0x40, 0x2c, 0x46, - 0x9e, 0xc9, 0x8d, 0x3b, 0xe0, 0xf1, 0x35, 0xbe, 0x76, 0xd5, 0xb0, 0x43, 0xe5, 0x21, 0x84, 0x78, - 0x23, 0x41, 0x4b, 0xcb, 0x63, 0xc1, 0xfb, 0x59, 0x1e, 0x7b, 0x36, 0xc0, 0x9a, 0xff, 0x81, 0x0d, - 0x10, 0x5e, 0x52, 0x76, 0xd9, 0xf4, 0x1e, 0xfb, 0x1c, 0xcf, 0xb5, 0x67, 0xa4, 0x7b, 0x67, 0xe7, - 0x64, 0xee, 0xf7, 0xcd, 0xf5, 0x39, 0xbe, 0xe7, 0x37, 0x06, 0x08, 0x64, 0x20, 0x97, 0xe3, 0x44, - 0xa6, 0xb2, 0xd5, 0xc4, 0xeb, 0xfc, 0xf2, 0xd0, 0xe1, 0x40, 0xca, 0x20, 0x14, 0x47, 0xf3, 0xbf, - 0x3a, 0xd9, 0xf6, 0xd1, 0x2d, 0xa1, 0xba, 0x49, 0x2f, 0x4e, 0x65, 0x52, 0x2c, 0xf6, 0x1e, 0x80, - 0x05, 0x5a, 0xdc, 0x16, 0x51, 0xd6, 0x6f, 0xc7, 0x89, 0xd8, 0xee, 0x5d, 0x68, 0xdd, 0xb6, 0x5c, - 0x90, 0xcb, 0x4c, 0x2e, 0xaf, 0xe8, 0x4f, 0x1f, 0x8c, 0xd3, 0x9e, 0x8c, 0xd4, 0xc1, 0x6b, 0xbf, - 0xdf, 0x78, 0xf8, 0x86, 0x3b, 0x1b, 0x1b, 0xf3, 0x84, 0xe2, 0x67, 0xeb, 0x39, 0xe8, 0x6d, 0xc0, - 0xcd, 0x15, 0x9f, 0x4a, 0x93, 0x5e, 0x14, 0x88, 0xc4, 0x62, 0xfc, 0x99, 0x8c, 0x0b, 0x86, 0xf1, - 0x21, 0x42, 0xbd, 0x53, 0x30, 0x33, 0x8e, 0xeb, 0x17, 0x72, 0x4d, 0x0b, 0x53, 0xb2, 0x0a, 0x73, - 0xb9, 0xa4, 0x9b, 0xa9, 0x54, 0xf6, 0x23, 0xbf, 0x2f, 0x2c, 0x9a, 0x5f, 0x73, 0x4d, 0x73, 0x63, - 0x16, 0xb1, 0x53, 0x25, 0xe5, 0x9d, 0x85, 0x45, 0xfc, 0xcf, 0x79, 0x3f, 0xcc, 0x84, 0x69, 0x3b, - 0x32, 0xd2, 0x76, 0x16, 0x97, 0xb1, 0xf2, 0xb7, 0x8b, 0x13, 0xb9, 0x72, 0xa1, 0x14, 0x18, 0x5e, - 0xa3, 0x13, 0x81, 0x48, 0x53, 0x91, 0xa8, 0xb6, 0x1f, 0x86, 0x23, 0x36, 0x79, 0xba, 0x17, 0x96, - 0xc6, 0x4b, 0xbb, 0xd5, 0x4e, 0xac, 0x16, 0xe4, 0x89, 0x30, 0xf4, 0x36, 0xe1, 0x96, 0x11, 0x9d, - 0x75, 0x70, 0x5e, 0x26, 0xe7, 0xe2, 0x50, 0x77, 0x51, 0xbb, 0x0e, 0xfc, 0xff, 0xb2, 0x1f, 0x0e, - 0xce, 0x77, 0xc9, 0xd9, 0x22, 0x96, 0xdb, 0x82, 0xc6, 0xfb, 0x60, 0xfe, 0xbc, 0x48, 0x3a, 0x52, - 0x89, 0xb6, 0x78, 0x2a, 0xf3, 0x43, 0x07, 0xdd, 0x15, 0xd2, 0xcd, 0x11, 0xb8, 0x82, 0x1c, 0xba, - 0x8e, 0x41, 0x63, 0xdb, 0xef, 0x0a, 0x07, 0xc5, 0x55, 0x52, 0x4c, 0xe2, 0x7a, 0x44, 0x4f, 0xc0, - 0x74, 0x20, 0x8b, 0x5b, 0x72, 0xc0, 0xdf, 0x23, 0x7c, 0x8a, 0x19, 0x52, 0xc4, 0x32, 0xce, 0x42, - 0x3f, 0x75, 0xd9, 0xc1, 0xfb, 0xac, 0x60, 0x86, 0x14, 0x63, 0x94, 0xf5, 0x03, 0x56, 0x28, 0xa3, - 0x9e, 0xf7, 0xc2, 0x94, 0x8c, 0xc2, 0x1d, 0x19, 0xb9, 0x6c, 0xe2, 0x43, 0x32, 0x00, 0x21, 0x28, - 0x38, 0x0e, 0x4d, 0xd7, 0x46, 0x7c, 0x44, 0x78, 0x43, 0x70, 0x07, 0xf4, 0x39, 0xe3, 0x21, 0xa3, - 0x57, 0x38, 0x28, 0x3e, 0x26, 0xc5, 0xac, 0x81, 0xd1, 0x6d, 0xa4, 0x42, 0xa5, 0x81, 0x70, 0x91, - 0x7c, 0xc2, 0xb7, 0x41, 0x08, 0x95, 0xb2, 0x23, 0xa2, 0xee, 0x39, 0x37, 0xc3, 0xa7, 0x5c, 0x4a, - 0x66, 0x50, 0xa1, 0x27, 0x4f, 0xdf, 0x4f, 0xd4, 0x39, 0x3f, 0x74, 0x6a, 0xc7, 0x67, 0xe4, 0x98, - 0x2e, 0x21, 0xaa, 0x48, 0x16, 0x8d, 0xa3, 0xf9, 0x9c, 0x2b, 0x62, 0x60, 0x74, 0xf4, 0x54, 0xea, - 0x77, 0x42, 0xd1, 0x1e, 0xc7, 0xf6, 0x05, 0x1f, 0xbd, 0x82, 0x5d, 0x33, 0x8d, 0xba, 0xd3, 0xaa, - 0xf7, 0xb4, 0x93, 0xe6, 0x4b, 0xee, 0x74, 0x0e, 0x20, 0xfc, 0x18, 0xdc, 0x3a, 0x72, 0xd4, 0x3b, - 0xc8, 0xbe, 0x22, 0xd9, 0xd2, 0x88, 0x71, 0x4f, 0x23, 0x61, 0x5c, 0xe5, 0xd7, 0x3c, 0x12, 0x44, - 0xcd, 0xa5, 0xab, 0x96, 0x45, 0xca, 0xdf, 0x1e, 0xaf, 0x6a, 0xdf, 0x70, 0xd5, 0x0a, 0xb6, 0x52, - 0xb5, 0x87, 0x61, 0x89, 0x8c, 0xe3, 0xf5, 0xf5, 0x5b, 0x1e, 0xac, 0x05, 0xbd, 0x59, 0xed, 0xee, - 0xe3, 0x70, 0xa8, 0x2c, 0xe7, 0x85, 0x54, 0x44, 0x0a, 0x19, 0xbd, 0xe7, 0xd8, 0xc1, 0x7c, 0x8d, - 0xcc, 0x3c, 0xf1, 0x57, 0x4a, 0xc1, 0x9a, 0x1f, 0xa3, 0xfc, 0x51, 0x38, 0xc8, 0xf2, 0x2c, 0x4a, - 0x44, 0x57, 0x06, 0x91, 0x6e, 0xe3, 0x96, 0x83, 0xfa, 0xbb, 0x5a, 0xab, 0x36, 0x0d, 0x1c, 0xcd, - 0x67, 0xe0, 0xa6, 0xf2, 0xf7, 0x46, 0xbb, 0xd7, 0x8f, 0x65, 0x92, 0x5a, 0x8c, 0xdf, 0x73, 0xa7, - 0x4a, 0xee, 0x4c, 0x8e, 0x79, 0x2b, 0x30, 0x9b, 0xff, 0xe9, 0xfa, 0x48, 0xfe, 0x40, 0xa2, 0x99, - 0x01, 0x45, 0x83, 0xa3, 0x2b, 0xfb, 0xb1, 0x9f, 0xb8, 0xcc, 0xbf, 0x1f, 0x79, 0x70, 0x10, 0x52, - 0x3c, 0x7d, 0x73, 0xb5, 0x24, 0x6e, 0xdd, 0x31, 0x24, 0x59, 0x13, 0x4a, 0xf9, 0x41, 0xe9, 0x79, - 0x66, 0x8f, 0xce, 0x6c, 0x35, 0x88, 0xbd, 0xfb, 0xb1, 0x3c, 0xd5, 0xb8, 0xb4, 0xcb, 0x2e, 0xee, - 0x95, 0x15, 0xaa, 0xa4, 0xa5, 0x77, 0x1a, 0x66, 0x2a, 0x51, 0x69, 0x57, 0x3d, 0x4b, 0xaa, 0x69, - 0x33, 0x29, 0xbd, 0xbb, 0x60, 0x02, 0x63, 0xcf, 0x8e, 0x3f, 0x47, 0x78, 0xbe, 0xdc, 0xbb, 0x07, - 0x1a, 0x1c, 0x77, 0x76, 0xf4, 0x79, 0x42, 0x4b, 0x04, 0x71, 0x8e, 0x3a, 0x3b, 0xfe, 0x02, 0xe3, - 0x8c, 0x20, 0xee, 0x5e, 0xc2, 0x9f, 0x5e, 0x9a, 0xa0, 0x71, 0xc5, 0xb5, 0x3b, 0x0e, 0x93, 0x94, - 0x71, 0x76, 0xfa, 0x45, 0xfa, 0x72, 0x26, 0xbc, 0xbb, 0xe1, 0x80, 0x63, 0xc1, 0x5f, 0x26, 0xb4, - 0x58, 0xaf, 0x13, 0x64, 0xca, 0xc8, 0x35, 0x3b, 0xfe, 0x0a, 0xe1, 0x26, 0x85, 0x5b, 0xa7, 0x5c, - 0xb3, 0x0b, 0x5e, 0xe5, 0xad, 0x13, 0x81, 0x65, 0xe3, 0x48, 0xb3, 0xd3, 0xaf, 0x71, 0xd5, 0x19, - 0xd1, 0xa7, 0xa9, 0x59, 0x8e, 0x29, 0x3b, 0xff, 0x3a, 0xf1, 0x03, 0x06, 0x2b, 0x60, 0x8c, 0x49, - 0xbb, 0xe2, 0x0d, 0xae, 0x80, 0x41, 0xe1, 0x31, 0xaa, 0x47, 0x9f, 0xdd, 0xf4, 0x26, 0x1f, 0xa3, - 0x5a, 0xf2, 0x61, 0x37, 0xf3, 0x69, 0x61, 0x57, 0xbc, 0xc5, 0xdd, 0xcc, 0xd7, 0xe3, 0x36, 0xea, - 0x59, 0x62, 0x77, 0xbc, 0xcd, 0xdb, 0xa8, 0x45, 0x89, 0x4e, 0xa6, 0xd6, 0x70, 0x8e, 0xd8, 0x7d, - 0xef, 0x90, 0x6f, 0x7e, 0x28, 0x46, 0xbc, 0x47, 0x60, 0x69, 0x74, 0x86, 0xd8, 0xad, 0x97, 0xf6, - 0x6a, 0xbf, 0xfa, 0xcd, 0x08, 0xd1, 0x91, 0xb7, 0x38, 0x2a, 0x3f, 0xec, 0xda, 0xcb, 0x7b, 0xd5, - 0x17, 0x3b, 0x33, 0x3e, 0xf4, 0x2f, 0x34, 0x18, 0x8c, 0x6e, 0xbb, 0xeb, 0x0a, 0xb9, 0x0c, 0x08, - 0x8f, 0x06, 0x4d, 0x6e, 0x3b, 0x7f, 0x95, 0x8f, 0x06, 0x11, 0x1a, 0x6e, 0x44, 0x59, 0x18, 0xe2, - 0xc3, 0xd1, 0xba, 0x7d, 0x44, 0x4c, 0x88, 0x70, 0x8b, 0xd9, 0x3f, 0xf6, 0xe9, 0x60, 0x30, 0xa0, - 0x67, 0xe8, 0x01, 0xd1, 0xef, 0xe8, 0x1a, 0x58, 0xc8, 0x3f, 0xf7, 0x79, 0x20, 0xe0, 0x6a, 0x7d, - 0x9e, 0xa0, 0x78, 0x69, 0x4c, 0x77, 0x62, 0xeb, 0xb7, 0xfe, 0xb5, 0x5f, 0xbc, 0x83, 0x1a, 0xc8, - 0x40, 0x90, 0xbf, 0x75, 0x5a, 0x04, 0xbb, 0x55, 0x41, 0xfe, 0xa2, 0x79, 0x0c, 0x26, 0x9f, 0x50, - 0x32, 0x4a, 0xfd, 0xc0, 0x46, 0xff, 0x4d, 0x34, 0xaf, 0xc7, 0x82, 0xf5, 0x65, 0x22, 0xf4, 0xa5, - 0xb2, 0xb1, 0xff, 0x10, 0x5b, 0x02, 0x08, 0x77, 0x7d, 0x95, 0xba, 0xdc, 0xf7, 0xbf, 0x0c, 0x33, - 0x80, 0x9b, 0xc6, 0xeb, 0x27, 0xc5, 0x8e, 0x8d, 0xfd, 0x8f, 0x37, 0x4d, 0xeb, 0xf5, 0x00, 0x6c, - 0xe2, 0x65, 0xfe, 0xbe, 0x6d, 0x83, 0xff, 0x27, 0x78, 0x40, 0x9c, 0x3c, 0x02, 0x0b, 0xfa, 0x79, - 0xa9, 0x63, 0x27, 0x61, 0x55, 0xae, 0xca, 0xf5, 0xfc, 0x41, 0xbc, 0x1e, 0x00, 0x00, 0xff, 0xff, - 0x87, 0x5c, 0xee, 0x2b, 0x7e, 0x11, 0x00, 0x00, + // 1201 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0xcb, 0x6f, 0x1c, 0x45, + 0x13, 0xc0, 0xf5, 0xe9, 0x73, 0x64, 0x6f, 0xf9, 0x85, 0xd7, 0xc6, 0x84, 0x08, 0x44, 0x72, 0xe3, + 0xe4, 0x9c, 0x22, 0x94, 0xb6, 0x22, 0xcb, 0xb1, 0x1c, 0x2b, 0x11, 0x06, 0x63, 0xe2, 0x00, 0xe2, + 0xb0, 0x9a, 0xdd, 0x6d, 0x4f, 0x06, 0x66, 0xa6, 0x87, 0x99, 0x9e, 0x28, 0xce, 0x0d, 0x85, 0x87, + 0x10, 0xe2, 0x8d, 0x04, 0x09, 0x49, 0x80, 0x03, 0xef, 0x67, 0x78, 0x1f, 0xb9, 0xf0, 0xb8, 0xf2, + 0x3f, 0x70, 0x01, 0xcc, 0xdb, 0x37, 0x5f, 0x50, 0xcd, 0x56, 0xcd, 0xf6, 0xac, 0x57, 0xea, 0xde, + 0xdb, 0xec, 0xba, 0x7f, 0xbf, 0xad, 0xa9, 0x9a, 0xae, 0xea, 0x31, 0x80, 0xaf, 0x7c, 0x35, 0x97, + 0xa4, 0x4a, 0xab, 0x7a, 0x0d, 0xaf, 0x8b, 0xcb, 0x03, 0x07, 0x7d, 0xa5, 0xfc, 0x50, 0x1e, 0x2e, + 0x3e, 0x35, 0xf3, 0xcd, 0xc3, 0x6d, 0x99, 0xb5, 0xd2, 0x20, 0xd1, 0x2a, 0xed, 0x2c, 0x16, 0x77, + 0xc1, 0x34, 0x2d, 0x6e, 0xc8, 0x38, 0x8f, 0x1a, 0x49, 0x2a, 0x37, 0x83, 0xf3, 0xf5, 0x5b, 0xe6, + 0x3a, 0xe4, 0x1c, 0x93, 0x73, 0xcb, 0x71, 0x1e, 0xdd, 0x9d, 0xe8, 0x40, 0xc5, 0xd9, 0xfe, 0xeb, + 0x3f, 0xff, 0xff, 0xe0, 0xff, 0x6e, 0x1f, 0x59, 0x9f, 0x22, 0x14, 0xff, 0xb6, 0x56, 0x80, 0x62, + 0x1d, 0x6e, 0xac, 0xf8, 0x32, 0x9d, 0x06, 0xb1, 0x2f, 0x53, 0x8b, 0xf1, 0x3b, 0x32, 0x4e, 0x1b, + 0xc6, 0x7b, 0x09, 0x15, 0x4b, 0x30, 0x3e, 0x88, 0xeb, 0x7b, 0x72, 0x8d, 0x49, 0x53, 0xb2, 0x02, + 0x93, 0x85, 0xa4, 0x95, 0x67, 0x5a, 0x45, 0xb1, 0x17, 0x49, 0x8b, 0xe6, 0x87, 0x42, 0x53, 0x5b, + 0x9f, 0x40, 0x6c, 0xa9, 0xa4, 0x84, 0x80, 0x11, 0xfc, 0xa6, 0x2d, 0x5b, 0xa1, 0xc5, 0xf0, 0x23, + 0x05, 0x52, 0xae, 0x17, 0x67, 0x60, 0x06, 0xaf, 0xcf, 0x79, 0x61, 0x2e, 0xcd, 0x48, 0x0e, 0xf5, + 0xf5, 0x9c, 0xc1, 0x65, 0x2c, 0xfb, 0xe9, 0xe2, 0x50, 0x11, 0xce, 0x74, 0x29, 0x30, 0x62, 0x32, + 0xaa, 0xe8, 0x4b, 0xad, 0x65, 0x9a, 0x35, 0xbc, 0xb0, 0x5f, 0x78, 0x27, 0x82, 0xb0, 0x34, 0x5e, + 0xda, 0xae, 0x56, 0x71, 0xa5, 0x43, 0x2e, 0x86, 0xa1, 0xd8, 0x80, 0x9b, 0xfa, 0x3c, 0x15, 0x0e, + 0xce, 0xcb, 0xe4, 0x9c, 0xd9, 0xf3, 0x64, 0xa0, 0x76, 0x0d, 0xf8, 0xfb, 0xb2, 0x96, 0x0e, 0xce, + 0xd7, 0xc8, 0x59, 0x27, 0x96, 0x4b, 0x8a, 0xc6, 0x53, 0x30, 0x75, 0x4e, 0xa6, 0x4d, 0x95, 0xc9, + 0x86, 0x7c, 0x24, 0xf7, 0x42, 0x07, 0xdd, 0x15, 0xd2, 0x4d, 0x12, 0xb8, 0x8c, 0x1c, 0xba, 0x8e, + 0xc2, 0xc8, 0xa6, 0xd7, 0x92, 0x0e, 0x8a, 0xab, 0xa4, 0x18, 0xc6, 0xf5, 0x88, 0x2e, 0xc2, 0x98, + 0xaf, 0x3a, 0xb7, 0xe4, 0x80, 0x5f, 0x23, 0x7c, 0x94, 0x19, 0x52, 0x24, 0x2a, 0xc9, 0x43, 0x4f, + 0xbb, 0x44, 0xf0, 0x3a, 0x2b, 0x98, 0x21, 0xc5, 0x00, 0x69, 0x7d, 0x83, 0x15, 0x99, 0x91, 0xcf, + 0x05, 0x18, 0x55, 0x71, 0xb8, 0xa5, 0x62, 0x97, 0x20, 0xde, 0x24, 0x03, 0x10, 0x82, 0x82, 0x79, + 0xa8, 0xb9, 0x16, 0xe2, 0xad, 0x6d, 0xde, 0x1e, 0x5c, 0x81, 0x15, 0x98, 0xe4, 0x06, 0x15, 0xa8, + 0xd8, 0x41, 0xf1, 0x36, 0x29, 0x26, 0x0c, 0x8c, 0x6e, 0x43, 0xcb, 0x4c, 0xfb, 0xd2, 0x45, 0xf2, + 0x0e, 0xdf, 0x06, 0x21, 0x94, 0xca, 0xa6, 0x8c, 0x5b, 0x67, 0xdd, 0x0c, 0xef, 0x72, 0x2a, 0x99, + 0x41, 0xc5, 0x12, 0x8c, 0x47, 0x5e, 0x9a, 0x9d, 0xf5, 0x42, 0xa7, 0x72, 0xbc, 0x47, 0x8e, 0xb1, + 0x12, 0xa2, 0x8c, 0xe4, 0xf1, 0x20, 0x9a, 0xf7, 0x39, 0x23, 0x06, 0x46, 0x5b, 0x2f, 0xd3, 0x5e, + 0x33, 0x94, 0x8d, 0x41, 0x6c, 0x1f, 0xf0, 0xd6, 0xeb, 0xb0, 0xab, 0xa6, 0x71, 0x1e, 0x6a, 0x59, + 0x70, 0xc1, 0x49, 0xf3, 0x21, 0x57, 0xba, 0x00, 0x10, 0x7e, 0x00, 0x6e, 0xee, 0x3b, 0x26, 0x1c, + 0x64, 0x1f, 0x91, 0x6c, 0xb6, 0xcf, 0xa8, 0xa0, 0x96, 0x30, 0xa8, 0xf2, 0x63, 0x6e, 0x09, 0xb2, + 0xc7, 0xb5, 0x06, 0x33, 0x79, 0x9c, 0x79, 0x9b, 0x83, 0x65, 0xed, 0x13, 0xce, 0x5a, 0x87, 0xad, + 0x64, 0xed, 0x34, 0xcc, 0x92, 0x71, 0xb0, 0xba, 0x7e, 0xca, 0x8d, 0xb5, 0x43, 0x6f, 0x54, 0xab, + 0xfb, 0x20, 0x1c, 0x28, 0xd3, 0x79, 0x5e, 0xcb, 0x38, 0x43, 0xa6, 0x11, 0x79, 0x89, 0x83, 0xf9, + 0x3a, 0x99, 0xb9, 0xe3, 0x2f, 0x97, 0x82, 0x55, 0x2f, 0x41, 0xf9, 0xfd, 0xb0, 0x9f, 0xe5, 0x79, + 0x9c, 0xca, 0x96, 0xf2, 0xe3, 0xe0, 0x82, 0x6c, 0x3b, 0xa8, 0x3f, 0xeb, 0x29, 0xd5, 0x86, 0x81, + 0xa3, 0xf9, 0x24, 0xdc, 0x50, 0x9e, 0x55, 0x1a, 0x41, 0x94, 0xa8, 0x54, 0x5b, 0x8c, 0x9f, 0x73, + 0xa5, 0x4a, 0xee, 0x64, 0x81, 0x89, 0x65, 0x98, 0x28, 0x3e, 0xba, 0x3e, 0x92, 0x5f, 0x90, 0x68, + 0xbc, 0x4b, 0x51, 0xe3, 0x68, 0xa9, 0x28, 0xf1, 0x52, 0x97, 0xfe, 0xf7, 0x25, 0x37, 0x0e, 0x42, + 0xa8, 0x71, 0xe8, 0xad, 0x44, 0xe2, 0xb4, 0x77, 0x30, 0x7c, 0xc5, 0x8d, 0x83, 0x19, 0x52, 0xf0, + 0x81, 0xc1, 0x41, 0xf1, 0x35, 0x2b, 0x98, 0x41, 0xc5, 0x3d, 0xdd, 0x41, 0x9b, 0x4a, 0x3f, 0xc8, + 0x74, 0xea, 0xe1, 0x6a, 0x8b, 0xea, 0x9b, 0xed, 0xea, 0x21, 0x6c, 0xdd, 0x40, 0xc5, 0x29, 0x98, + 0xec, 0x39, 0x62, 0xd4, 0x6f, 0xdb, 0x63, 0x5b, 0x95, 0x59, 0xe6, 0xf9, 0xa5, 0xf0, 0xd1, 0x1d, + 0x6a, 0x46, 0xd5, 0x13, 0x86, 0xb8, 0x13, 0xeb, 0x5e, 0x3d, 0x07, 0xd8, 0x65, 0x17, 0x77, 0xca, + 0xd2, 0x57, 0x8e, 0x01, 0xe2, 0x04, 0x8c, 0x57, 0xce, 0x00, 0x76, 0xd5, 0x63, 0xa4, 0x1a, 0x33, + 0x8f, 0x00, 0xe2, 0x08, 0x0c, 0xe1, 0x3c, 0xb7, 0xe3, 0x8f, 0x13, 0x5e, 0x2c, 0x17, 0xc7, 0x60, + 0x84, 0xe7, 0xb8, 0x1d, 0x7d, 0x82, 0xd0, 0x12, 0x41, 0x9c, 0x67, 0xb8, 0x1d, 0x7f, 0x92, 0x71, + 0x46, 0x10, 0x77, 0x4f, 0xe1, 0xb7, 0x4f, 0x0f, 0x51, 0x1f, 0xe6, 0xdc, 0xcd, 0xc3, 0x30, 0x0d, + 0x6f, 0x3b, 0xfd, 0x14, 0xfd, 0x38, 0x13, 0xe2, 0x0e, 0xd8, 0xe7, 0x98, 0xf0, 0x67, 0x08, 0xed, + 0xac, 0x17, 0x4b, 0x30, 0x6a, 0x0c, 0x6c, 0x3b, 0xfe, 0x2c, 0xe1, 0x26, 0x85, 0xa1, 0xd3, 0xc0, + 0xb6, 0x0b, 0x9e, 0xe3, 0xd0, 0x89, 0xc0, 0xb4, 0xf1, 0xac, 0xb6, 0xd3, 0xcf, 0x73, 0xd6, 0x19, + 0x11, 0x0b, 0x50, 0x2b, 0xfb, 0xaf, 0x9d, 0x7f, 0x81, 0xf8, 0x2e, 0x83, 0x19, 0x30, 0xfa, 0xbf, + 0x5d, 0xf1, 0x22, 0x67, 0xc0, 0xa0, 0x70, 0x1b, 0xf5, 0xce, 0x74, 0xbb, 0xe9, 0x25, 0xde, 0x46, + 0x3d, 0x23, 0x1d, 0xab, 0x59, 0xb4, 0x41, 0xbb, 0xe2, 0x65, 0xae, 0x66, 0xb1, 0x1e, 0xc3, 0xe8, + 0x1d, 0x92, 0x76, 0xc7, 0x2b, 0x1c, 0x46, 0xcf, 0x8c, 0x14, 0x6b, 0x50, 0xdf, 0x3b, 0x20, 0xed, + 0xbe, 0x57, 0xc9, 0x37, 0xb5, 0x67, 0x3e, 0x8a, 0xfb, 0x60, 0xb6, 0xff, 0x70, 0xb4, 0x5b, 0x2f, + 0xed, 0xf4, 0xbc, 0xce, 0x98, 0xb3, 0x51, 0x9c, 0xee, 0x76, 0x59, 0x73, 0x30, 0xda, 0xb5, 0x97, + 0x77, 0xaa, 0x8d, 0xd6, 0x9c, 0x8b, 0x62, 0x11, 0xa0, 0x3b, 0x93, 0xec, 0xae, 0x2b, 0xe4, 0x32, + 0x20, 0xdc, 0x1a, 0x34, 0x92, 0xec, 0xfc, 0x55, 0xde, 0x1a, 0x44, 0xe0, 0xd6, 0xe0, 0x69, 0x64, + 0xa7, 0xaf, 0xf1, 0xd6, 0x60, 0x44, 0xcc, 0xc3, 0x48, 0x9c, 0x87, 0x21, 0x3e, 0x5b, 0xf5, 0x5b, + 0xfb, 0x8c, 0x1b, 0x19, 0xb6, 0x19, 0xfe, 0x65, 0x97, 0x60, 0x06, 0xc4, 0x11, 0xd8, 0x27, 0xa3, + 0xa6, 0x6c, 0xdb, 0xc8, 0x5f, 0x77, 0xb9, 0x9f, 0xe0, 0x6a, 0xb1, 0x00, 0xd0, 0x79, 0x99, 0xc6, + 0x28, 0x6c, 0xec, 0x6f, 0xbb, 0x9d, 0xf7, 0x7a, 0x03, 0xe9, 0x0a, 0x8a, 0xb7, 0x71, 0x8b, 0x60, + 0xbb, 0x2a, 0x28, 0x5e, 0xc0, 0x8f, 0xc2, 0xf0, 0x43, 0x99, 0x8a, 0xb5, 0xe7, 0xdb, 0xe8, 0xdf, + 0x89, 0xe6, 0xf5, 0x98, 0xb0, 0x48, 0xa5, 0x52, 0x7b, 0x7e, 0x66, 0x63, 0xff, 0x20, 0xb6, 0x04, + 0x10, 0x6e, 0x79, 0x99, 0x76, 0xb9, 0xef, 0x3f, 0x19, 0x66, 0x00, 0x83, 0xc6, 0xeb, 0x87, 0xe5, + 0x96, 0x8d, 0xfd, 0x8b, 0x83, 0xa6, 0xf5, 0xe2, 0x18, 0xd4, 0xf0, 0xb2, 0xf8, 0x3f, 0x84, 0x0d, + 0xfe, 0x9b, 0xe0, 0x2e, 0x81, 0xbf, 0x9c, 0xe9, 0xb6, 0x0e, 0xec, 0xc9, 0xfe, 0x87, 0x2a, 0xcd, + 0xeb, 0xc5, 0x22, 0x8c, 0x66, 0xba, 0xdd, 0xce, 0xe9, 0x44, 0x63, 0xc1, 0xff, 0xdd, 0x2d, 0x5f, + 0x72, 0x4b, 0xe6, 0xf8, 0x21, 0x98, 0x6e, 0xa9, 0xa8, 0x17, 0x3c, 0x0e, 0x2b, 0x6a, 0x45, 0xad, + 0x15, 0xbb, 0xe8, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0a, 0x9c, 0xec, 0xd8, 0x50, 0x13, 0x00, + 0x00, } diff --git a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto index 18a58c5d..fbca44cd 100644 --- a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto +++ b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto @@ -39,6 +39,7 @@ extend google.protobuf.EnumOptions { optional bool goproto_enum_stringer = 62021; optional bool enum_stringer = 62022; optional string enum_customname = 62023; + optional bool enumdecl = 62024; } extend google.protobuf.EnumValueOptions { @@ -77,6 +78,10 @@ extend google.protobuf.FileOptions { optional bool gogoproto_import = 63027; optional bool protosizer_all = 63028; optional bool compare_all = 63029; + optional bool typedecl_all = 63030; + optional bool enumdecl_all = 63031; + + optional bool goproto_registration = 63032; } extend google.protobuf.MessageOptions { @@ -107,6 +112,8 @@ extend google.protobuf.MessageOptions { optional bool protosizer = 64028; optional bool compare = 64029; + + optional bool typedecl = 64030; } extend google.protobuf.FieldOptions { @@ -119,4 +126,7 @@ extend google.protobuf.FieldOptions { optional string casttype = 65007; optional string castkey = 65008; optional string castvalue = 65009; + + optional bool stdtime = 65010; + optional bool stdduration = 65011; } diff --git a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/gogoproto/helper.go b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/gogoproto/helper.go index 670021fe..6b851c56 100644 --- a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/gogoproto/helper.go +++ b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/gogoproto/helper.go @@ -39,6 +39,14 @@ func IsNullable(field *google_protobuf.FieldDescriptorProto) bool { return proto.GetBoolExtension(field.Options, E_Nullable, true) } +func IsStdTime(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Stdtime, false) +} + +func IsStdDuration(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Stdduration, false) +} + func NeedsNilCheck(proto3 bool, field *google_protobuf.FieldDescriptorProto) bool { nullable := IsNullable(field) if field.IsMessage() || IsCustomType(field) { @@ -82,7 +90,18 @@ func IsCastValue(field *google_protobuf.FieldDescriptorProto) bool { return false } +func HasEnumDecl(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_Enumdecl, proto.GetBoolExtension(file.Options, E_EnumdeclAll, true)) +} + +func HasTypeDecl(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Typedecl, proto.GetBoolExtension(file.Options, E_TypedeclAll, true)) +} + func GetCustomType(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } if field.Options != nil { v, err := proto.GetExtension(field.Options, E_Customtype) if err == nil && v.(*string) != nil { @@ -93,6 +112,9 @@ func GetCustomType(field *google_protobuf.FieldDescriptorProto) string { } func GetCastType(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } if field.Options != nil { v, err := proto.GetExtension(field.Options, E_Casttype) if err == nil && v.(*string) != nil { @@ -103,6 +125,9 @@ func GetCastType(field *google_protobuf.FieldDescriptorProto) string { } func GetCastKey(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } if field.Options != nil { v, err := proto.GetExtension(field.Options, E_Castkey) if err == nil && v.(*string) != nil { @@ -113,6 +138,9 @@ func GetCastKey(field *google_protobuf.FieldDescriptorProto) string { } func GetCastValue(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } if field.Options != nil { v, err := proto.GetExtension(field.Options, E_Castvalue) if err == nil && v.(*string) != nil { @@ -147,6 +175,9 @@ func IsEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) bool } func GetCustomName(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } if field.Options != nil { v, err := proto.GetExtension(field.Options, E_Customname) if err == nil && v.(*string) != nil { @@ -157,6 +188,9 @@ func GetCustomName(field *google_protobuf.FieldDescriptorProto) string { } func GetEnumCustomName(field *google_protobuf.EnumDescriptorProto) string { + if field == nil { + return "" + } if field.Options != nil { v, err := proto.GetExtension(field.Options, E_EnumCustomname) if err == nil && v.(*string) != nil { @@ -167,6 +201,9 @@ func GetEnumCustomName(field *google_protobuf.EnumDescriptorProto) string { } func GetEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) string { + if field == nil { + return "" + } if field.Options != nil { v, err := proto.GetExtension(field.Options, E_EnumvalueCustomname) if err == nil && v.(*string) != nil { @@ -177,6 +214,9 @@ func GetEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) str } func GetJsonTag(field *google_protobuf.FieldDescriptorProto) *string { + if field == nil { + return nil + } if field.Options != nil { v, err := proto.GetExtension(field.Options, E_Jsontag) if err == nil && v.(*string) != nil { @@ -187,6 +227,9 @@ func GetJsonTag(field *google_protobuf.FieldDescriptorProto) *string { } func GetMoreTags(field *google_protobuf.FieldDescriptorProto) *string { + if field == nil { + return nil + } if field.Options != nil { v, err := proto.GetExtension(field.Options, E_Moretags) if err == nil && v.(*string) != nil { @@ -308,3 +351,7 @@ func ImportsGoGoProto(file *google_protobuf.FileDescriptorProto) bool { func HasCompare(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { return proto.GetBoolExtension(message.Options, E_Compare, proto.GetBoolExtension(file.Options, E_CompareAll, false)) } + +func RegistersGolangProto(file *google_protobuf.FileDescriptorProto) bool { + return proto.GetBoolExtension(file.Options, E_GoprotoRegistration, false) +} diff --git a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/Makefile b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/Makefile index 23a6b173..41c71757 100644 --- a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/Makefile +++ b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/Makefile @@ -39,5 +39,5 @@ test: install generate-test-pbs generate-test-pbs: make install make -C testdata - protoc-min-version --version="3.0.0" --proto_path=.:../../../../ --gogo_out=. proto3_proto/proto3.proto + protoc-min-version --version="3.0.0" --proto_path=.:../../../../:../protobuf --gogo_out=Mtestdata/test.proto=github.com/gogo/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/gogo/protobuf/types:. proto3_proto/proto3.proto make diff --git a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/decode.go b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/decode.go index 0d6634cc..737f2731 100644 --- a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/decode.go +++ b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/decode.go @@ -61,7 +61,6 @@ var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for // int32, int64, uint32, uint64, bool, and enum // protocol buffer types. func DecodeVarint(buf []byte) (x uint64, n int) { - // x, n already 0 for shift := uint(0); shift < 64; shift += 7 { if n >= len(buf) { return 0, 0 @@ -78,13 +77,7 @@ func DecodeVarint(buf []byte) (x uint64, n int) { return 0, 0 } -// DecodeVarint reads a varint-encoded integer from the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) DecodeVarint() (x uint64, err error) { - // x, err already 0 - +func (p *Buffer) decodeVarintSlow() (x uint64, err error) { i := p.index l := len(p.buf) @@ -107,6 +100,107 @@ func (p *Buffer) DecodeVarint() (x uint64, err error) { return } +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) DecodeVarint() (x uint64, err error) { + i := p.index + buf := p.buf + + if i >= len(buf) { + return 0, io.ErrUnexpectedEOF + } else if buf[i] < 0x80 { + p.index++ + return uint64(buf[i]), nil + } else if len(buf)-i < 10 { + return p.decodeVarintSlow() + } + + var b uint64 + // we already checked the first byte + x = uint64(buf[i]) - 0x80 + i++ + + b = uint64(buf[i]) + i++ + x += b << 7 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 7 + + b = uint64(buf[i]) + i++ + x += b << 14 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 14 + + b = uint64(buf[i]) + i++ + x += b << 21 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 21 + + b = uint64(buf[i]) + i++ + x += b << 28 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 28 + + b = uint64(buf[i]) + i++ + x += b << 35 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 35 + + b = uint64(buf[i]) + i++ + x += b << 42 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 42 + + b = uint64(buf[i]) + i++ + x += b << 49 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 49 + + b = uint64(buf[i]) + i++ + x += b << 56 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 56 + + b = uint64(buf[i]) + i++ + x += b << 63 + if b&0x80 == 0 { + goto done + } + // x -= 0x80 << 63 // Always zero. + + return 0, errOverflow + +done: + p.index = i + return x, nil +} + // DecodeFixed64 reads a 64-bit integer from the Buffer. // This is the format for the // fixed64, sfixed64, and double protocol buffer types. @@ -340,6 +434,8 @@ func (p *Buffer) DecodeGroup(pb Message) error { // Buffer and places the decoded result in pb. If the struct // underlying pb does not match the data in the buffer, the results can be // unpredictable. +// +// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. func (p *Buffer) Unmarshal(pb Message) error { // If the object can unmarshal itself, let it. if u, ok := pb.(Unmarshaler); ok { diff --git a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/decode_gogo.go b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/decode_gogo.go index ecc63873..6fb74de4 100644 --- a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/decode_gogo.go +++ b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/decode_gogo.go @@ -98,7 +98,7 @@ func setPtrCustomType(base structPointer, f field, v interface{}) { if v == nil { return } - structPointer_SetStructPointer(base, f, structPointer(reflect.ValueOf(v).Pointer())) + structPointer_SetStructPointer(base, f, toStructPointer(reflect.ValueOf(v))) } func setCustomType(base structPointer, f field, value interface{}) { @@ -165,7 +165,8 @@ func (o *Buffer) dec_custom_slice_bytes(p *Properties, base structPointer) error } newBas := appendStructPointer(base, p.field, p.ctype) - setCustomType(newBas, 0, custom) + var zero field + setCustomType(newBas, zero, custom) return nil } diff --git a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/duration.go b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/duration.go new file mode 100644 index 00000000..93464c91 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/duration.go @@ -0,0 +1,100 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// This file implements conversions between google.protobuf.Duration +// and time.Duration. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Range of a Duration in seconds, as specified in + // google/protobuf/duration.proto. This is about 10,000 years in seconds. + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// validateDuration determines whether the Duration is valid according to the +// definition in google/protobuf/duration.proto. A valid Duration +// may still be too large to fit into a time.Duration (the range of Duration +// is about 10,000 years, and the range of time.Duration is about 290). +func validateDuration(d *duration) error { + if d == nil { + return errors.New("duration: nil Duration") + } + if d.Seconds < minSeconds || d.Seconds > maxSeconds { + return fmt.Errorf("duration: %#v: seconds out of range", d) + } + if d.Nanos <= -1e9 || d.Nanos >= 1e9 { + return fmt.Errorf("duration: %#v: nanos out of range", d) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { + return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d) + } + return nil +} + +// DurationFromProto converts a Duration to a time.Duration. DurationFromProto +// returns an error if the Duration is invalid or is too large to be +// represented in a time.Duration. +func durationFromProto(p *duration) (time.Duration, error) { + if err := validateDuration(p); err != nil { + return 0, err + } + d := time.Duration(p.Seconds) * time.Second + if int64(d/time.Second) != p.Seconds { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + if p.Nanos != 0 { + d += time.Duration(p.Nanos) + if (d < 0) != (p.Nanos < 0) { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a Duration. +func durationProto(d time.Duration) *duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &duration{ + Seconds: secs, + Nanos: int32(nanos), + } +} diff --git a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/duration_gogo.go b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/duration_gogo.go new file mode 100644 index 00000000..18e2a5f7 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/duration_gogo.go @@ -0,0 +1,203 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +var durationType = reflect.TypeOf((*time.Duration)(nil)).Elem() + +type duration struct { + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (m *duration) Reset() { *m = duration{} } +func (*duration) ProtoMessage() {} +func (*duration) String() string { return "duration" } + +func init() { + RegisterType((*duration)(nil), "gogo.protobuf.proto.duration") +} + +func (o *Buffer) decDuration() (time.Duration, error) { + b, err := o.DecodeRawBytes(true) + if err != nil { + return 0, err + } + dproto := &duration{} + if err := Unmarshal(b, dproto); err != nil { + return 0, err + } + return durationFromProto(dproto) +} + +func (o *Buffer) dec_duration(p *Properties, base structPointer) error { + d, err := o.decDuration() + if err != nil { + return err + } + word64_Set(structPointer_Word64(base, p.field), o, uint64(d)) + return nil +} + +func (o *Buffer) dec_ref_duration(p *Properties, base structPointer) error { + d, err := o.decDuration() + if err != nil { + return err + } + word64Val_Set(structPointer_Word64Val(base, p.field), o, uint64(d)) + return nil +} + +func (o *Buffer) dec_slice_duration(p *Properties, base structPointer) error { + d, err := o.decDuration() + if err != nil { + return err + } + newBas := appendStructPointer(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))) + var zero field + setPtrCustomType(newBas, zero, &d) + return nil +} + +func (o *Buffer) dec_slice_ref_duration(p *Properties, base structPointer) error { + d, err := o.decDuration() + if err != nil { + return err + } + structPointer_Word64Slice(base, p.field).Append(uint64(d)) + return nil +} + +func size_duration(p *Properties, base structPointer) (n int) { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return 0 + } + dur := structPointer_Interface(structp, durationType).(*time.Duration) + d := durationProto(*dur) + size := Size(d) + return size + sizeVarint(uint64(size)) + len(p.tagcode) +} + +func (o *Buffer) enc_duration(p *Properties, base structPointer) error { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return ErrNil + } + dur := structPointer_Interface(structp, durationType).(*time.Duration) + d := durationProto(*dur) + data, err := Marshal(d) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_ref_duration(p *Properties, base structPointer) (n int) { + dur := structPointer_InterfaceAt(base, p.field, durationType).(*time.Duration) + d := durationProto(*dur) + size := Size(d) + return size + sizeVarint(uint64(size)) + len(p.tagcode) +} + +func (o *Buffer) enc_ref_duration(p *Properties, base structPointer) error { + dur := structPointer_InterfaceAt(base, p.field, durationType).(*time.Duration) + d := durationProto(*dur) + data, err := Marshal(d) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_slice_duration(p *Properties, base structPointer) (n int) { + pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))).(*[]*time.Duration) + durs := *pdurs + for i := 0; i < len(durs); i++ { + if durs[i] == nil { + return 0 + } + dproto := durationProto(*durs[i]) + size := Size(dproto) + n += len(p.tagcode) + size + sizeVarint(uint64(size)) + } + return n +} + +func (o *Buffer) enc_slice_duration(p *Properties, base structPointer) error { + pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))).(*[]*time.Duration) + durs := *pdurs + for i := 0; i < len(durs); i++ { + if durs[i] == nil { + return errRepeatedHasNil + } + dproto := durationProto(*durs[i]) + data, err := Marshal(dproto) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + } + return nil +} + +func size_slice_ref_duration(p *Properties, base structPointer) (n int) { + pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(durationType)).(*[]time.Duration) + durs := *pdurs + for i := 0; i < len(durs); i++ { + dproto := durationProto(durs[i]) + size := Size(dproto) + n += len(p.tagcode) + size + sizeVarint(uint64(size)) + } + return n +} + +func (o *Buffer) enc_slice_ref_duration(p *Properties, base structPointer) error { + pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(durationType)).(*[]time.Duration) + durs := *pdurs + for i := 0; i < len(durs); i++ { + dproto := durationProto(durs[i]) + data, err := Marshal(dproto) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + } + return nil +} diff --git a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/encode.go b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/encode.go index 8c1b8fd1..8b84d1b2 100644 --- a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/encode.go +++ b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/encode.go @@ -174,11 +174,11 @@ func sizeFixed32(x uint64) int { // This is the format used for the sint64 protocol buffer type. func (p *Buffer) EncodeZigzag64(x uint64) error { // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) + return p.EncodeVarint((x << 1) ^ uint64((int64(x) >> 63))) } func sizeZigzag64(x uint64) int { - return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) + return sizeVarint((x << 1) ^ uint64((int64(x) >> 63))) } // EncodeZigzag32 writes a zigzag-encoded 32-bit integer @@ -234,10 +234,6 @@ func Marshal(pb Message) ([]byte, error) { } p := NewBuffer(nil) err := p.Marshal(pb) - var state errorState - if err != nil && !state.shouldContinue(err, nil) { - return nil, err - } if p.buf == nil && err == nil { // Return a non-nil slice on success. return []byte{}, nil @@ -266,11 +262,8 @@ func (p *Buffer) Marshal(pb Message) error { // Can the object marshal itself? if m, ok := pb.(Marshaler); ok { data, err := m.Marshal() - if err != nil { - return err - } p.buf = append(p.buf, data...) - return nil + return err } t, base, err := getbase(pb) @@ -282,7 +275,7 @@ func (p *Buffer) Marshal(pb Message) error { } if collectStats { - stats.Encode++ + (stats).Encode++ // Parens are to work around a goimports bug. } if len(p.buf) > maxMarshalSize { @@ -309,7 +302,7 @@ func Size(pb Message) (n int) { } if collectStats { - stats.Size++ + (stats).Size++ // Parens are to work around a goimports bug. } return @@ -1014,7 +1007,6 @@ func size_slice_struct_message(p *Properties, base structPointer) (n int) { if p.isMarshaler { m := structPointer_Interface(structp, p.stype).(Marshaler) data, _ := m.Marshal() - n += len(p.tagcode) n += sizeRawBytes(data) continue } @@ -1083,10 +1075,17 @@ func (o *Buffer) enc_map(p *Properties, base structPointer) error { func (o *Buffer) enc_exts(p *Properties, base structPointer) error { exts := structPointer_Extensions(base, p.field) - if err := encodeExtensions(exts); err != nil { + + v, mu := exts.extensionsRead() + if v == nil { + return nil + } + + mu.Lock() + defer mu.Unlock() + if err := encodeExtensionsMap(v); err != nil { return err } - v, _ := exts.extensionsRead() return o.enc_map_body(v) } diff --git a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/encode_gogo.go b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/encode_gogo.go index 66e7e163..32111b7f 100644 --- a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/encode_gogo.go +++ b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/encode_gogo.go @@ -196,12 +196,10 @@ func size_ref_struct_message(p *Properties, base structPointer) int { // Encode a slice of references to message struct pointers ([]struct). func (o *Buffer) enc_slice_ref_struct_message(p *Properties, base structPointer) error { var state errorState - ss := structPointer_GetStructPointer(base, p.field) - ss1 := structPointer_GetRefStructPointer(ss, field(0)) - size := p.stype.Size() - l := structPointer_Len(base, p.field) + ss := structPointer_StructRefSlice(base, p.field, p.stype.Size()) + l := ss.Len() for i := 0; i < l; i++ { - structp := structPointer_Add(ss1, field(uintptr(i)*size)) + structp := ss.Index(i) if structPointer_IsNil(structp) { return errRepeatedHasNil } @@ -233,13 +231,11 @@ func (o *Buffer) enc_slice_ref_struct_message(p *Properties, base structPointer) //TODO this is only copied, please fix this func size_slice_ref_struct_message(p *Properties, base structPointer) (n int) { - ss := structPointer_GetStructPointer(base, p.field) - ss1 := structPointer_GetRefStructPointer(ss, field(0)) - size := p.stype.Size() - l := structPointer_Len(base, p.field) + ss := structPointer_StructRefSlice(base, p.field, p.stype.Size()) + l := ss.Len() n += l * len(p.tagcode) for i := 0; i < l; i++ { - structp := structPointer_Add(ss1, field(uintptr(i)*size)) + structp := ss.Index(i) if structPointer_IsNil(structp) { return // return the size up to this point } diff --git a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/equal.go b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/equal.go index 8b16f951..2ed1cf59 100644 --- a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/equal.go +++ b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/equal.go @@ -54,13 +54,17 @@ Equality is defined in this way: in a proto3 .proto file, fields are not "set"; specifically, zero length proto3 "bytes" fields are equal (nil == {}). - Two repeated fields are equal iff their lengths are the same, - and their corresponding elements are equal (a "bytes" field, - although represented by []byte, is not a repeated field) + and their corresponding elements are equal. Note a "bytes" field, + although represented by []byte, is not a repeated field and the + rule for the scalar fields described above applies. - Two unset fields are equal. - Two unknown field sets are equal if their current encoded state is equal. - Two extension sets are equal iff they have corresponding elements that are pairwise equal. + - Two map fields are equal iff their lengths are the same, + and they contain the same set of elements. Zero-length map + fields are equal. - Every other combination of things are not equal. The return value is undefined if a and b are not protocol buffers. diff --git a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/extensions.go b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/extensions.go index f7384baa..0dfcb538 100644 --- a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/extensions.go +++ b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/extensions.go @@ -167,6 +167,7 @@ type ExtensionDesc struct { Field int32 // field number Name string // fully-qualified name of extension, for text formatting Tag string // protobuf tag style + Filename string // name of the file in which the extension is defined } func (ed *ExtensionDesc) repeated() bool { @@ -587,6 +588,9 @@ func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { registeredExtensions := RegisteredExtensions(pb) emap, mu := epb.extensionsRead() + if emap == nil { + return nil, nil + } mu.Lock() defer mu.Unlock() extensions := make([]*ExtensionDesc, 0, len(emap)) diff --git a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/lib.go b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/lib.go index 2c30d709..c98d73da 100644 --- a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/lib.go +++ b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/lib.go @@ -73,7 +73,6 @@ for a protocol buffer variable v: When the .proto file specifies `syntax="proto3"`, there are some differences: - Non-repeated fields of non-message type are values instead of pointers. - - Getters are only generated for message and oneof fields. - Enum types do not get an Enum method. The simplest way to describe this is to see an example. @@ -308,7 +307,7 @@ func GetStats() Stats { return stats } // temporary Buffer and are fine for most applications. type Buffer struct { buf []byte // encode/decode byte stream - index int // write point + index int // read point // pools of basic types to amortize allocation. bools []bool diff --git a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go new file mode 100644 index 00000000..1763a5f2 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go @@ -0,0 +1,85 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build appengine js + +package proto + +import ( + "reflect" +) + +func structPointer_FieldPointer(p structPointer, f field) structPointer { + panic("not implemented") +} + +func appendStructPointer(base structPointer, f field, typ reflect.Type) structPointer { + panic("not implemented") +} + +func structPointer_InterfaceAt(p structPointer, f field, t reflect.Type) interface{} { + panic("not implemented") +} + +func structPointer_InterfaceRef(p structPointer, f field, t reflect.Type) interface{} { + panic("not implemented") +} + +func structPointer_GetRefStructPointer(p structPointer, f field) structPointer { + panic("not implemented") +} + +func structPointer_Add(p structPointer, size field) structPointer { + panic("not implemented") +} + +func structPointer_Len(p structPointer, f field) int { + panic("not implemented") +} + +func structPointer_GetSliceHeader(p structPointer, f field) *reflect.SliceHeader { + panic("not implemented") +} + +func structPointer_Copy(oldptr structPointer, newptr structPointer, size int) { + panic("not implemented") +} + +func structPointer_StructRefSlice(p structPointer, f field, size uintptr) *structRefSlice { + panic("not implemented") +} + +type structRefSlice struct{} + +func (v *structRefSlice) Len() int { + panic("not implemented") +} + +func (v *structRefSlice) Index(i int) structPointer { + panic("not implemented") +} diff --git a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go index 132ea4df..f156a29f 100644 --- a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go +++ b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go @@ -26,7 +26,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// +build !appengine +// +build !appengine,!js // This file contains the implementation of the proto field accesses using package unsafe. @@ -72,16 +72,13 @@ func structPointer_Copy(oldptr structPointer, newptr structPointer, size int) { func appendStructPointer(base structPointer, f field, typ reflect.Type) structPointer { size := typ.Elem().Size() + oldHeader := structPointer_GetSliceHeader(base, f) + oldSlice := reflect.NewAt(typ, unsafe.Pointer(oldHeader)).Elem() newLen := oldHeader.Len + 1 - slice := reflect.MakeSlice(typ, newLen, newLen) - bas := toStructPointer(slice) - for i := 0; i < oldHeader.Len; i++ { - newElemptr := uintptr(bas) + uintptr(i)*size - oldElemptr := oldHeader.Data + uintptr(i)*size - copyUintPtr(oldElemptr, newElemptr, int(size)) - } - + newSlice := reflect.MakeSlice(typ, newLen, newLen) + reflect.Copy(newSlice, oldSlice) + bas := toStructPointer(newSlice) oldHeader.Data = uintptr(bas) oldHeader.Len = newLen oldHeader.Cap = newLen @@ -108,3 +105,24 @@ func structPointer_Add(p structPointer, size field) structPointer { func structPointer_Len(p structPointer, f field) int { return len(*(*[]interface{})(unsafe.Pointer(structPointer_GetRefStructPointer(p, f)))) } + +func structPointer_StructRefSlice(p structPointer, f field, size uintptr) *structRefSlice { + return &structRefSlice{p: p, f: f, size: size} +} + +// A structRefSlice represents a slice of structs (themselves submessages or groups). +type structRefSlice struct { + p structPointer + f field + size uintptr +} + +func (v *structRefSlice) Len() int { + return structPointer_Len(v.p, v.f) +} + +func (v *structRefSlice) Index(i int) structPointer { + ss := structPointer_GetStructPointer(v.p, v.f) + ss1 := structPointer_GetRefStructPointer(ss, 0) + return structPointer_Add(ss1, field(uintptr(i)*v.size)) +} diff --git a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/properties.go b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/properties.go index 3e4cad03..2a69e886 100644 --- a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/properties.go +++ b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/properties.go @@ -190,10 +190,12 @@ type Properties struct { proto3 bool // whether this is known to be a proto3 field; set for []byte only oneof bool // whether this is a oneof field - Default string // default value - HasDefault bool // whether an explicit default was provided - CustomType string - def_uint64 uint64 + Default string // default value + HasDefault bool // whether an explicit default was provided + CustomType string + CastType string + StdTime bool + StdDuration bool enc encoder valEnc valueEncoder // set for bool and numeric types only @@ -340,6 +342,12 @@ func (p *Properties) Parse(s string) { p.OrigName = strings.Split(f, "=")[1] case strings.HasPrefix(f, "customtype="): p.CustomType = strings.Split(f, "=")[1] + case strings.HasPrefix(f, "casttype="): + p.CastType = strings.Split(f, "=")[1] + case f == "stdtime": + p.StdTime = true + case f == "stdduration": + p.StdDuration = true } } } @@ -355,11 +363,22 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock p.enc = nil p.dec = nil p.size = nil - if len(p.CustomType) > 0 { + isMap := typ.Kind() == reflect.Map + if len(p.CustomType) > 0 && !isMap { p.setCustomEncAndDec(typ) p.setTag(lockGetProp) return } + if p.StdTime && !isMap { + p.setTimeEncAndDec(typ) + p.setTag(lockGetProp) + return + } + if p.StdDuration && !isMap { + p.setDurationEncAndDec(typ) + p.setTag(lockGetProp) + return + } switch t1 := typ; t1.Kind() { default: fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) @@ -630,6 +649,10 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock // so we need encoders for the pointer to this type. vtype = reflect.PtrTo(vtype) } + + p.mvalprop.CustomType = p.CustomType + p.mvalprop.StdDuration = p.StdDuration + p.mvalprop.StdTime = p.StdTime p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) } p.setTag(lockGetProp) @@ -920,7 +943,15 @@ func RegisterType(x Message, name string) { } // MessageName returns the fully-qualified proto name for the given message type. -func MessageName(x Message) string { return revProtoTypes[reflect.TypeOf(x)] } +func MessageName(x Message) string { + type xname interface { + XXX_MessageName() string + } + if m, ok := x.(xname); ok { + return m.XXX_MessageName() + } + return revProtoTypes[reflect.TypeOf(x)] +} // MessageType returns the message type (pointer to struct) for a named message. func MessageType(name string) reflect.Type { return protoTypes[name] } diff --git a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/properties_gogo.go b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/properties_gogo.go index 4607a975..b6b7176c 100644 --- a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/properties_gogo.go +++ b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/properties_gogo.go @@ -51,6 +51,51 @@ func (p *Properties) setCustomEncAndDec(typ reflect.Type) { } } +func (p *Properties) setDurationEncAndDec(typ reflect.Type) { + if p.Repeated { + if typ.Elem().Kind() == reflect.Ptr { + p.enc = (*Buffer).enc_slice_duration + p.dec = (*Buffer).dec_slice_duration + p.size = size_slice_duration + } else { + p.enc = (*Buffer).enc_slice_ref_duration + p.dec = (*Buffer).dec_slice_ref_duration + p.size = size_slice_ref_duration + } + } else if typ.Kind() == reflect.Ptr { + p.enc = (*Buffer).enc_duration + p.dec = (*Buffer).dec_duration + p.size = size_duration + } else { + p.enc = (*Buffer).enc_ref_duration + p.dec = (*Buffer).dec_ref_duration + p.size = size_ref_duration + } +} + +func (p *Properties) setTimeEncAndDec(typ reflect.Type) { + if p.Repeated { + if typ.Elem().Kind() == reflect.Ptr { + p.enc = (*Buffer).enc_slice_time + p.dec = (*Buffer).dec_slice_time + p.size = size_slice_time + } else { + p.enc = (*Buffer).enc_slice_ref_time + p.dec = (*Buffer).dec_slice_ref_time + p.size = size_slice_ref_time + } + } else if typ.Kind() == reflect.Ptr { + p.enc = (*Buffer).enc_time + p.dec = (*Buffer).dec_time + p.size = size_time + } else { + p.enc = (*Buffer).enc_ref_time + p.dec = (*Buffer).dec_ref_time + p.size = size_ref_time + } + +} + func (p *Properties) setSliceOfNonPointerStructs(typ reflect.Type) { t2 := typ.Elem() p.sstype = typ diff --git a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/text.go b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/text.go index b3e12e26..f609d1d4 100644 --- a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/text.go +++ b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/text.go @@ -51,6 +51,7 @@ import ( "sort" "strings" "sync" + "time" ) var ( @@ -181,7 +182,93 @@ type raw interface { Bytes() []byte } -func writeStruct(w *textWriter, sv reflect.Value) error { +func requiresQuotes(u string) bool { + // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. + for _, ch := range u { + switch { + case ch == '.' || ch == '/' || ch == '_': + continue + case '0' <= ch && ch <= '9': + continue + case 'A' <= ch && ch <= 'Z': + continue + case 'a' <= ch && ch <= 'z': + continue + default: + return true + } + } + return false +} + +// isAny reports whether sv is a google.protobuf.Any message +func isAny(sv reflect.Value) bool { + type wkt interface { + XXX_WellKnownType() string + } + t, ok := sv.Addr().Interface().(wkt) + return ok && t.XXX_WellKnownType() == "Any" +} + +// writeProto3Any writes an expanded google.protobuf.Any message. +// +// It returns (false, nil) if sv value can't be unmarshaled (e.g. because +// required messages are not linked in). +// +// It returns (true, error) when sv was written in expanded format or an error +// was encountered. +func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { + turl := sv.FieldByName("TypeUrl") + val := sv.FieldByName("Value") + if !turl.IsValid() || !val.IsValid() { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + b, ok := val.Interface().([]byte) + if !ok { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + parts := strings.Split(turl.String(), "/") + mt := MessageType(parts[len(parts)-1]) + if mt == nil { + return false, nil + } + m := reflect.New(mt.Elem()) + if err := Unmarshal(b, m.Interface().(Message)); err != nil { + return false, nil + } + w.Write([]byte("[")) + u := turl.String() + if requiresQuotes(u) { + writeString(w, u) + } else { + w.Write([]byte(u)) + } + if w.compact { + w.Write([]byte("]:<")) + } else { + w.Write([]byte("]: <\n")) + w.ind++ + } + if err := tm.writeStruct(w, m.Elem()); err != nil { + return true, err + } + if w.compact { + w.Write([]byte("> ")) + } else { + w.ind-- + w.Write([]byte(">\n")) + } + return true, nil +} + +func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { + if tm.ExpandAny && isAny(sv) { + if canExpand, err := tm.writeProto3Any(w, sv); canExpand { + return err + } + } st := sv.Type() sprops := GetProperties(st) for i := 0; i < sv.NumField(); i++ { @@ -234,10 +321,10 @@ func writeStruct(w *textWriter, sv reflect.Value) error { continue } if len(props.Enum) > 0 { - if err := writeEnum(w, v, props); err != nil { + if err := tm.writeEnum(w, v, props); err != nil { return err } - } else if err := writeAny(w, v, props); err != nil { + } else if err := tm.writeAny(w, v, props); err != nil { return err } if err := w.WriteByte('\n'); err != nil { @@ -279,7 +366,7 @@ func writeStruct(w *textWriter, sv reflect.Value) error { return err } } - if err := writeAny(w, key, props.mkeyprop); err != nil { + if err := tm.writeAny(w, key, props.mkeyprop); err != nil { return err } if err := w.WriteByte('\n'); err != nil { @@ -296,7 +383,7 @@ func writeStruct(w *textWriter, sv reflect.Value) error { return err } } - if err := writeAny(w, val, props.mvalprop); err != nil { + if err := tm.writeAny(w, val, props.mvalprop); err != nil { return err } if err := w.WriteByte('\n'); err != nil { @@ -368,10 +455,10 @@ func writeStruct(w *textWriter, sv reflect.Value) error { } if len(props.Enum) > 0 { - if err := writeEnum(w, fv, props); err != nil { + if err := tm.writeEnum(w, fv, props); err != nil { return err } - } else if err := writeAny(w, fv, props); err != nil { + } else if err := tm.writeAny(w, fv, props); err != nil { return err } @@ -389,7 +476,7 @@ func writeStruct(w *textWriter, sv reflect.Value) error { pv.Elem().Set(sv) } if pv.Type().Implements(extensionRangeType) { - if err := writeExtensions(w, pv); err != nil { + if err := tm.writeExtensions(w, pv); err != nil { return err } } @@ -419,20 +506,56 @@ func writeRaw(w *textWriter, b []byte) error { } // writeAny writes an arbitrary field. -func writeAny(w *textWriter, v reflect.Value, props *Properties) error { +func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { v = reflect.Indirect(v) - if props != nil && len(props.CustomType) > 0 { - custom, ok := v.Interface().(Marshaler) - if ok { - data, err := custom.Marshal() + if props != nil { + if len(props.CustomType) > 0 { + custom, ok := v.Interface().(Marshaler) + if ok { + data, err := custom.Marshal() + if err != nil { + return err + } + if err := writeString(w, string(data)); err != nil { + return err + } + return nil + } + } else if len(props.CastType) > 0 { + if _, ok := v.Interface().(interface { + String() string + }); ok { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + _, err := fmt.Fprintf(w, "%d", v.Interface()) + return err + } + } + } else if props.StdTime { + t, ok := v.Interface().(time.Time) + if !ok { + return fmt.Errorf("stdtime is not time.Time, but %T", v.Interface()) + } + tproto, err := timestampProto(t) if err != nil { return err } - if err := writeString(w, string(data)); err != nil { - return err + propsCopy := *props // Make a copy so that this is goroutine-safe + propsCopy.StdTime = false + err = tm.writeAny(w, reflect.ValueOf(tproto), &propsCopy) + return err + } else if props.StdDuration { + d, ok := v.Interface().(time.Duration) + if !ok { + return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface()) } - return nil + dproto := durationProto(d) + propsCopy := *props // Make a copy so that this is goroutine-safe + propsCopy.StdDuration = false + err := tm.writeAny(w, reflect.ValueOf(dproto), &propsCopy) + return err } } @@ -482,15 +605,15 @@ func writeAny(w *textWriter, v reflect.Value, props *Properties) error { } } w.indent() - if tm, ok := v.Interface().(encoding.TextMarshaler); ok { - text, err := tm.MarshalText() + if etm, ok := v.Interface().(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() if err != nil { return err } if _, err = w.Write(text); err != nil { return err } - } else if err := writeStruct(w, v); err != nil { + } else if err := tm.writeStruct(w, v); err != nil { return err } w.unindent() @@ -634,7 +757,7 @@ func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } // writeExtensions writes all the extensions in pv. // pv is assumed to be a pointer to a protocol message struct that is extendable. -func writeExtensions(w *textWriter, pv reflect.Value) error { +func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { emap := extensionMaps[pv.Type().Elem()] e := pv.Interface().(Message) @@ -689,13 +812,13 @@ func writeExtensions(w *textWriter, pv reflect.Value) error { // Repeated extensions will appear as a slice. if !desc.repeated() { - if err := writeExtension(w, desc.Name, pb); err != nil { + if err := tm.writeExtension(w, desc.Name, pb); err != nil { return err } } else { v := reflect.ValueOf(pb) for i := 0; i < v.Len(); i++ { - if err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { + if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { return err } } @@ -704,7 +827,7 @@ func writeExtensions(w *textWriter, pv reflect.Value) error { return nil } -func writeExtension(w *textWriter, name string, pb interface{}) error { +func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { return err } @@ -713,7 +836,7 @@ func writeExtension(w *textWriter, name string, pb interface{}) error { return err } } - if err := writeAny(w, reflect.ValueOf(pb), nil); err != nil { + if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { return err } if err := w.WriteByte('\n'); err != nil { @@ -740,12 +863,13 @@ func (w *textWriter) writeIndent() { // TextMarshaler is a configurable text format marshaler. type TextMarshaler struct { - Compact bool // use compact text format (one line). + Compact bool // use compact text format (one line). + ExpandAny bool // expand google.protobuf.Any messages of known types } // Marshal writes a given protocol buffer in text format. // The only errors returned are from w. -func (m *TextMarshaler) Marshal(w io.Writer, pb Message) error { +func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { val := reflect.ValueOf(pb) if pb == nil || val.IsNil() { w.Write([]byte("")) @@ -760,11 +884,11 @@ func (m *TextMarshaler) Marshal(w io.Writer, pb Message) error { aw := &textWriter{ w: ww, complete: true, - compact: m.Compact, + compact: tm.Compact, } - if tm, ok := pb.(encoding.TextMarshaler); ok { - text, err := tm.MarshalText() + if etm, ok := pb.(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() if err != nil { return err } @@ -778,7 +902,7 @@ func (m *TextMarshaler) Marshal(w io.Writer, pb Message) error { } // Dereference the received pointer so we don't have outer < and >. v := reflect.Indirect(val) - if err := writeStruct(aw, v); err != nil { + if err := tm.writeStruct(aw, v); err != nil { return err } if bw != nil { @@ -788,9 +912,9 @@ func (m *TextMarshaler) Marshal(w io.Writer, pb Message) error { } // Text is the same as Marshal, but returns the string directly. -func (m *TextMarshaler) Text(pb Message) string { +func (tm *TextMarshaler) Text(pb Message) string { var buf bytes.Buffer - m.Marshal(&buf, pb) + tm.Marshal(&buf, pb) return buf.String() } diff --git a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/text_gogo.go b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/text_gogo.go index 58926741..1d6c6aa0 100644 --- a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/text_gogo.go +++ b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/text_gogo.go @@ -33,10 +33,10 @@ import ( "reflect" ) -func writeEnum(w *textWriter, v reflect.Value, props *Properties) error { +func (tm *TextMarshaler) writeEnum(w *textWriter, v reflect.Value, props *Properties) error { m, ok := enumStringMaps[props.Enum] if !ok { - if err := writeAny(w, v, props); err != nil { + if err := tm.writeAny(w, v, props); err != nil { return err } } @@ -48,7 +48,7 @@ func writeEnum(w *textWriter, v reflect.Value, props *Properties) error { } s, ok := m[key] if !ok { - if err := writeAny(w, v, props); err != nil { + if err := tm.writeAny(w, v, props); err != nil { return err } } diff --git a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/text_parser.go b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/text_parser.go index bcd732c3..f1276729 100644 --- a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/text_parser.go +++ b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/text_parser.go @@ -46,9 +46,13 @@ import ( "reflect" "strconv" "strings" + "time" "unicode/utf8" ) +// Error string emitted when deserializing Any and fields are already set +const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" + type ParseError struct { Message string Line int // 1-based line number @@ -168,7 +172,7 @@ func (p *textParser) advance() { p.cur.offset, p.cur.line = p.offset, p.line p.cur.unquoted = "" switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',': + case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': // Single symbol p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] case '"', '\'': @@ -456,7 +460,10 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { fieldSet := make(map[string]bool) // A struct is a sequence of "name: value", terminated by one of // '>' or '}', or the end of the input. A name may also be - // "[extension]". + // "[extension]" or "[type/url]". + // + // The whole struct can also be an expanded Any message, like: + // [type/url] < ... struct contents ... > for { tok := p.next() if tok.err != nil { @@ -466,33 +473,74 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { break } if tok.value == "[" { - // Looks like an extension. + // Looks like an extension or an Any. // // TODO: Check whether we need to handle // namespace rooted names (e.g. ".something.Foo"). - tok = p.next() - if tok.err != nil { - return tok.err + extName, err := p.consumeExtName() + if err != nil { + return err + } + + if s := strings.LastIndex(extName, "/"); s >= 0 { + // If it contains a slash, it's an Any type URL. + messageName := extName[s+1:] + mt := MessageType(messageName) + if mt == nil { + return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) + } + tok = p.next() + if tok.err != nil { + return tok.err + } + // consume an optional colon + if tok.value == ":" { + tok = p.next() + if tok.err != nil { + return tok.err + } + } + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + v := reflect.New(mt.Elem()) + if pe := p.readStruct(v.Elem(), terminator); pe != nil { + return pe + } + b, err := Marshal(v.Interface().(Message)) + if err != nil { + return p.errorf("failed to marshal message of type %q: %v", messageName, err) + } + if fieldSet["type_url"] { + return p.errorf(anyRepeatedlyUnpacked, "type_url") + } + if fieldSet["value"] { + return p.errorf(anyRepeatedlyUnpacked, "value") + } + sv.FieldByName("TypeUrl").SetString(extName) + sv.FieldByName("Value").SetBytes(b) + fieldSet["type_url"] = true + fieldSet["value"] = true + continue } + var desc *ExtensionDesc // This could be faster, but it's functional. // TODO: Do something smarter than a linear scan. for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { - if d.Name == tok.value { + if d.Name == extName { desc = d break } } if desc == nil { - return p.errorf("unrecognized extension %q", tok.value) - } - // Check the extension terminator. - tok = p.next() - if tok.err != nil { - return tok.err - } - if tok.value != "]" { - return p.errorf("unrecognized extension terminator %q", tok.value) + return p.errorf("unrecognized extension %q", extName) } props := &Properties{} @@ -550,7 +598,11 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { props = oop.Prop nv := reflect.New(oop.Type.Elem()) dst = nv.Elem().Field(0) - sv.Field(oop.Field).Set(nv) + field := sv.Field(oop.Field) + if !field.IsNil() { + return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) + } + field.Set(nv) } if !dst.IsValid() { return p.errorf("unknown field name %q in %v", name, st) @@ -657,6 +709,35 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { return reqFieldErr } +// consumeExtName consumes extension name or expanded Any type URL and the +// following ']'. It returns the name or URL consumed. +func (p *textParser) consumeExtName() (string, error) { + tok := p.next() + if tok.err != nil { + return "", tok.err + } + + // If extension name or type url is quoted, it's a single token. + if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { + name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) + if err != nil { + return "", err + } + return name, p.consumeToken("]") + } + + // Consume everything up to "]" + var parts []string + for tok.value != "]" { + parts = append(parts, tok.value) + tok = p.next() + if tok.err != nil { + return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) + } + } + return strings.Join(parts, ""), nil +} + // consumeOptionalSeparator consumes an optional semicolon or comma. // It is used in readStruct to provide backward compatibility. func (p *textParser) consumeOptionalSeparator() error { @@ -717,6 +798,80 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { } return nil } + if props.StdTime { + fv := v + p.back() + props.StdTime = false + tproto := ×tamp{} + err := p.readAny(reflect.ValueOf(tproto).Elem(), props) + props.StdTime = true + if err != nil { + return err + } + tim, err := timestampFromProto(tproto) + if err != nil { + return err + } + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + if t.Elem().Kind() == reflect.Ptr { + ts := fv.Interface().([]*time.Time) + ts = append(ts, &tim) + fv.Set(reflect.ValueOf(ts)) + return nil + } else { + ts := fv.Interface().([]time.Time) + ts = append(ts, tim) + fv.Set(reflect.ValueOf(ts)) + return nil + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + v.Set(reflect.ValueOf(&tim)) + } else { + v.Set(reflect.Indirect(reflect.ValueOf(&tim))) + } + return nil + } + if props.StdDuration { + fv := v + p.back() + props.StdDuration = false + dproto := &duration{} + err := p.readAny(reflect.ValueOf(dproto).Elem(), props) + props.StdDuration = true + if err != nil { + return err + } + dur, err := durationFromProto(dproto) + if err != nil { + return err + } + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + if t.Elem().Kind() == reflect.Ptr { + ds := fv.Interface().([]*time.Duration) + ds = append(ds, &dur) + fv.Set(reflect.ValueOf(ds)) + return nil + } else { + ds := fv.Interface().([]time.Duration) + ds = append(ds, dur) + fv.Set(reflect.ValueOf(ds)) + return nil + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + v.Set(reflect.ValueOf(&dur)) + } else { + v.Set(reflect.Indirect(reflect.ValueOf(&dur))) + } + return nil + } switch fv := v; fv.Kind() { case reflect.Slice: at := v.Type() @@ -759,12 +914,12 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) return p.readAny(fv.Index(fv.Len()-1), props) case reflect.Bool: - // Either "true", "false", 1 or 0. + // true/1/t/True or false/f/0/False. switch tok.value { - case "true", "1": + case "true", "1", "t", "True": fv.SetBool(true) return nil - case "false", "0": + case "false", "0", "f", "False": fv.SetBool(false) return nil } @@ -828,7 +983,7 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { return p.readStruct(fv, terminator) case reflect.Uint32: if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(uint64(x)) + fv.SetUint(x) return nil } case reflect.Uint64: diff --git a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/timestamp.go b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/timestamp.go new file mode 100644 index 00000000..9324f654 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/timestamp.go @@ -0,0 +1,113 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// This file implements operations on google.protobuf.Timestamp. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range +// [0001-01-01, 10000-01-01) and has a Nanos field +// in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes +// the problem. +// +// Every valid Timestamp can be represented by a time.Time, but the converse is not true. +func validateTimestamp(ts *timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %#v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %#v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts) + } + return nil +} + +// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return value +// is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func timestampFromProto(ts *timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func timestampProto(t time.Time) (*timestamp, error) { + seconds := t.Unix() + nanos := int32(t.Sub(time.Unix(seconds, 0))) + ts := ×tamp{ + Seconds: seconds, + Nanos: nanos, + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} diff --git a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go new file mode 100644 index 00000000..d4276474 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go @@ -0,0 +1,229 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +var timeType = reflect.TypeOf((*time.Time)(nil)).Elem() + +type timestamp struct { + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (m *timestamp) Reset() { *m = timestamp{} } +func (*timestamp) ProtoMessage() {} +func (*timestamp) String() string { return "timestamp" } + +func init() { + RegisterType((*timestamp)(nil), "gogo.protobuf.proto.timestamp") +} + +func (o *Buffer) decTimestamp() (time.Time, error) { + b, err := o.DecodeRawBytes(true) + if err != nil { + return time.Time{}, err + } + tproto := ×tamp{} + if err := Unmarshal(b, tproto); err != nil { + return time.Time{}, err + } + return timestampFromProto(tproto) +} + +func (o *Buffer) dec_time(p *Properties, base structPointer) error { + t, err := o.decTimestamp() + if err != nil { + return err + } + setPtrCustomType(base, p.field, &t) + return nil +} + +func (o *Buffer) dec_ref_time(p *Properties, base structPointer) error { + t, err := o.decTimestamp() + if err != nil { + return err + } + setCustomType(base, p.field, &t) + return nil +} + +func (o *Buffer) dec_slice_time(p *Properties, base structPointer) error { + t, err := o.decTimestamp() + if err != nil { + return err + } + newBas := appendStructPointer(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))) + var zero field + setPtrCustomType(newBas, zero, &t) + return nil +} + +func (o *Buffer) dec_slice_ref_time(p *Properties, base structPointer) error { + t, err := o.decTimestamp() + if err != nil { + return err + } + newBas := appendStructPointer(base, p.field, reflect.SliceOf(timeType)) + var zero field + setCustomType(newBas, zero, &t) + return nil +} + +func size_time(p *Properties, base structPointer) (n int) { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return 0 + } + tim := structPointer_Interface(structp, timeType).(*time.Time) + t, err := timestampProto(*tim) + if err != nil { + return 0 + } + size := Size(t) + return size + sizeVarint(uint64(size)) + len(p.tagcode) +} + +func (o *Buffer) enc_time(p *Properties, base structPointer) error { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return ErrNil + } + tim := structPointer_Interface(structp, timeType).(*time.Time) + t, err := timestampProto(*tim) + if err != nil { + return err + } + data, err := Marshal(t) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_ref_time(p *Properties, base structPointer) (n int) { + tim := structPointer_InterfaceAt(base, p.field, timeType).(*time.Time) + t, err := timestampProto(*tim) + if err != nil { + return 0 + } + size := Size(t) + return size + sizeVarint(uint64(size)) + len(p.tagcode) +} + +func (o *Buffer) enc_ref_time(p *Properties, base structPointer) error { + tim := structPointer_InterfaceAt(base, p.field, timeType).(*time.Time) + t, err := timestampProto(*tim) + if err != nil { + return err + } + data, err := Marshal(t) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_slice_time(p *Properties, base structPointer) (n int) { + ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))).(*[]*time.Time) + tims := *ptims + for i := 0; i < len(tims); i++ { + if tims[i] == nil { + return 0 + } + tproto, err := timestampProto(*tims[i]) + if err != nil { + return 0 + } + size := Size(tproto) + n += len(p.tagcode) + size + sizeVarint(uint64(size)) + } + return n +} + +func (o *Buffer) enc_slice_time(p *Properties, base structPointer) error { + ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))).(*[]*time.Time) + tims := *ptims + for i := 0; i < len(tims); i++ { + if tims[i] == nil { + return errRepeatedHasNil + } + tproto, err := timestampProto(*tims[i]) + if err != nil { + return err + } + data, err := Marshal(tproto) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + } + return nil +} + +func size_slice_ref_time(p *Properties, base structPointer) (n int) { + ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(timeType)).(*[]time.Time) + tims := *ptims + for i := 0; i < len(tims); i++ { + tproto, err := timestampProto(tims[i]) + if err != nil { + return 0 + } + size := Size(tproto) + n += len(p.tagcode) + size + sizeVarint(uint64(size)) + } + return n +} + +func (o *Buffer) enc_slice_ref_time(p *Properties, base structPointer) error { + ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(timeType)).(*[]time.Time) + tims := *ptims + for i := 0; i < len(tims); i++ { + tproto, err := timestampProto(tims[i]) + if err != nil { + return err + } + data, err := Marshal(tproto) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + } + return nil +} diff --git a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile index d80ceffe..3496dc99 100644 --- a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile +++ b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile @@ -30,4 +30,7 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. regenerate: + go install github.com/gogo/protobuf/protoc-gen-gogo + go install github.com/gogo/protobuf/protoc-gen-gostring protoc --gogo_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto + protoc --gostring_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto diff --git a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go new file mode 100644 index 00000000..a85bf198 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go @@ -0,0 +1,118 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package descriptor provides functions for obtaining protocol buffer +// descriptors for generated Go types. +// +// These functions cannot go in package proto because they depend on the +// generated protobuf descriptor messages, which themselves depend on proto. +package descriptor + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + + "github.com/gogo/protobuf/proto" +) + +// extractFile extracts a FileDescriptorProto from a gzip'd buffer. +func extractFile(gz []byte) (*FileDescriptorProto, error) { + r, err := gzip.NewReader(bytes.NewReader(gz)) + if err != nil { + return nil, fmt.Errorf("failed to open gzip reader: %v", err) + } + defer r.Close() + + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, fmt.Errorf("failed to uncompress descriptor: %v", err) + } + + fd := new(FileDescriptorProto) + if err := proto.Unmarshal(b, fd); err != nil { + return nil, fmt.Errorf("malformed FileDescriptorProto: %v", err) + } + + return fd, nil +} + +// Message is a proto.Message with a method to return its descriptor. +// +// Message types generated by the protocol compiler always satisfy +// the Message interface. +type Message interface { + proto.Message + Descriptor() ([]byte, []int) +} + +// ForMessage returns a FileDescriptorProto and a DescriptorProto from within it +// describing the given message. +func ForMessage(msg Message) (fd *FileDescriptorProto, md *DescriptorProto) { + gz, path := msg.Descriptor() + fd, err := extractFile(gz) + if err != nil { + panic(fmt.Sprintf("invalid FileDescriptorProto for %T: %v", msg, err)) + } + + md = fd.MessageType[path[0]] + for _, i := range path[1:] { + md = md.NestedType[i] + } + return fd, md +} + +// Is this field a scalar numeric type? +func (field *FieldDescriptorProto) IsScalar() bool { + if field.Type == nil { + return false + } + switch *field.Type { + case FieldDescriptorProto_TYPE_DOUBLE, + FieldDescriptorProto_TYPE_FLOAT, + FieldDescriptorProto_TYPE_INT64, + FieldDescriptorProto_TYPE_UINT64, + FieldDescriptorProto_TYPE_INT32, + FieldDescriptorProto_TYPE_FIXED64, + FieldDescriptorProto_TYPE_FIXED32, + FieldDescriptorProto_TYPE_BOOL, + FieldDescriptorProto_TYPE_UINT32, + FieldDescriptorProto_TYPE_ENUM, + FieldDescriptorProto_TYPE_SFIXED32, + FieldDescriptorProto_TYPE_SFIXED64, + FieldDescriptorProto_TYPE_SINT32, + FieldDescriptorProto_TYPE_SINT64: + return true + default: + return false + } +} diff --git a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go index 6b361d6a..82623f04 100644 --- a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go +++ b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: descriptor.proto -// DO NOT EDIT! /* Package descriptor is a generated protocol buffer package. @@ -12,6 +11,7 @@ It has these top-level messages: FileDescriptorSet FileDescriptorProto DescriptorProto + ExtensionRangeOptions FieldDescriptorProto OneofDescriptorProto EnumDescriptorProto @@ -21,12 +21,14 @@ It has these top-level messages: FileOptions MessageOptions FieldOptions + OneofOptions EnumOptions EnumValueOptions ServiceOptions MethodOptions UninterpretedOption SourceCodeInfo + GeneratedCodeInfo */ package descriptor @@ -63,6 +65,10 @@ const ( FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 // New in version 2. @@ -133,7 +139,7 @@ func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { return nil } func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{3, 0} + return fileDescriptorDescriptor, []int{4, 0} } type FieldDescriptorProto_Label int32 @@ -173,7 +179,7 @@ func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { return nil } func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{3, 1} + return fileDescriptorDescriptor, []int{4, 1} } // Generated classes can be optimized for speed or code size. @@ -214,7 +220,7 @@ func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { return nil } func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{9, 0} + return fileDescriptorDescriptor, []int{10, 0} } type FieldOptions_CType int32 @@ -254,7 +260,7 @@ func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { return nil } func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{11, 0} + return fileDescriptorDescriptor, []int{12, 0} } type FieldOptions_JSType int32 @@ -296,7 +302,49 @@ func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { return nil } func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{11, 1} + return fileDescriptorDescriptor, []int{12, 1} +} + +// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, +// or neither? HTTP based RPC implementation may choose GET verb for safe +// methods, and PUT verb for idempotent methods instead of the default POST. +type MethodOptions_IdempotencyLevel int32 + +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 + MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 + MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 +) + +var MethodOptions_IdempotencyLevel_name = map[int32]string{ + 0: "IDEMPOTENCY_UNKNOWN", + 1: "NO_SIDE_EFFECTS", + 2: "IDEMPOTENT", +} +var MethodOptions_IdempotencyLevel_value = map[string]int32{ + "IDEMPOTENCY_UNKNOWN": 0, + "NO_SIDE_EFFECTS": 1, + "IDEMPOTENT": 2, +} + +func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { + p := new(MethodOptions_IdempotencyLevel) + *p = x + return p +} +func (x MethodOptions_IdempotencyLevel) String() string { + return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x)) +} +func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel") + if err != nil { + return err + } + *x = MethodOptions_IdempotencyLevel(value) + return nil +} +func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptorDescriptor, []int{17, 0} } // The protocol compiler can output a FileDescriptorSet containing the .proto @@ -528,9 +576,10 @@ func (m *DescriptorProto) GetReservedName() []string { } type DescriptorProto_ExtensionRange struct { - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` - XXX_unrecognized []byte `json:"-"` + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } @@ -554,6 +603,13 @@ func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { return 0 } +func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { + if m != nil { + return m.Options + } + return nil +} + // Range of reserved tag numbers. Reserved tag numbers may not be used by // fields or extension ranges in the same message. Reserved ranges may // not overlap. @@ -584,6 +640,33 @@ func (m *DescriptorProto_ReservedRange) GetEnd() int32 { return 0 } +type ExtensionRangeOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } +func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } +func (*ExtensionRangeOptions) ProtoMessage() {} +func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{3} } + +var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ExtensionRangeOptions +} + +func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + // Describes a field within a message. type FieldDescriptorProto struct { Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` @@ -622,7 +705,7 @@ type FieldDescriptorProto struct { func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } func (*FieldDescriptorProto) ProtoMessage() {} -func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{3} } +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{4} } func (m *FieldDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -696,14 +779,15 @@ func (m *FieldDescriptorProto) GetOptions() *FieldOptions { // Describes a oneof. type OneofDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - XXX_unrecognized []byte `json:"-"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } func (*OneofDescriptorProto) ProtoMessage() {} -func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{4} } +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{5} } func (m *OneofDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -712,6 +796,13 @@ func (m *OneofDescriptorProto) GetName() string { return "" } +func (m *OneofDescriptorProto) GetOptions() *OneofOptions { + if m != nil { + return m.Options + } + return nil +} + // Describes an enum type. type EnumDescriptorProto struct { Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` @@ -723,7 +814,7 @@ type EnumDescriptorProto struct { func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } func (*EnumDescriptorProto) ProtoMessage() {} -func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{5} } +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{6} } func (m *EnumDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -758,7 +849,7 @@ func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorPro func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } func (*EnumValueDescriptorProto) ProtoMessage() {} func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{6} + return fileDescriptorDescriptor, []int{7} } func (m *EnumValueDescriptorProto) GetName() string { @@ -793,7 +884,7 @@ type ServiceDescriptorProto struct { func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } func (*ServiceDescriptorProto) ProtoMessage() {} -func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{7} } +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{8} } func (m *ServiceDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -834,7 +925,7 @@ type MethodDescriptorProto struct { func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } func (*MethodDescriptorProto) ProtoMessage() {} -func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{8} } +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{9} } const Default_MethodDescriptorProto_ClientStreaming bool = false const Default_MethodDescriptorProto_ServerStreaming bool = false @@ -900,19 +991,8 @@ type FileOptions struct { // generated to contain the file's getDescriptor() method as well as any // top-level extensions defined in the file. JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` - // If set true, then the Java code generator will generate equals() and - // hashCode() methods for all messages defined in the .proto file. - // This increases generated code size, potentially substantially for large - // protos, which may harm a memory-constrained application. - // - In the full runtime this is a speed optimization, as the - // AbstractMessage base class includes reflection-based implementations of - // these methods. - // - In the lite runtime, setting this option changes the semantics of - // equals() and hashCode() to more closely match those of the full runtime; - // the generated methods compute their results based on field values rather - // than object identity. (Implementations should not assume that hashcodes - // will be consistent across runtimes or versions of the protocol compiler.) - JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash,def=0" json:"java_generate_equals_and_hash,omitempty"` + // This option does nothing. + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // If set true, then the Java2 code generator will generate code that // throws an exception whenever an attempt is made to assign a non-UTF-8 // byte sequence to a string field. @@ -940,6 +1020,7 @@ type FileOptions struct { CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` + PhpGenericServices *bool `protobuf:"varint,19,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` // Is this file deprecated? // Depending on the target platform, this can emit Deprecated annotations // for everything in the file, or it will be completely ignored; in the very @@ -953,9 +1034,18 @@ type FileOptions struct { ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` // Namespace for generated classes; defaults to the package. CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` - // Whether the nano proto compiler should generate in the deprecated non-nano - // suffixed package. - JavananoUseDeprecatedPackage *bool `protobuf:"varint,38,opt,name=javanano_use_deprecated_package,json=javananoUseDeprecatedPackage" json:"javanano_use_deprecated_package,omitempty"` + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` proto.XXX_InternalExtensions `json:"-"` @@ -965,10 +1055,10 @@ type FileOptions struct { func (m *FileOptions) Reset() { *m = FileOptions{} } func (m *FileOptions) String() string { return proto.CompactTextString(m) } func (*FileOptions) ProtoMessage() {} -func (*FileOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{9} } +func (*FileOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{10} } var extRange_FileOptions = []proto.ExtensionRange{ - {1000, 536870911}, + {Start: 1000, End: 536870911}, } func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { @@ -976,12 +1066,12 @@ func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { } const Default_FileOptions_JavaMultipleFiles bool = false -const Default_FileOptions_JavaGenerateEqualsAndHash bool = false const Default_FileOptions_JavaStringCheckUtf8 bool = false const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED const Default_FileOptions_CcGenericServices bool = false const Default_FileOptions_JavaGenericServices bool = false const Default_FileOptions_PyGenericServices bool = false +const Default_FileOptions_PhpGenericServices bool = false const Default_FileOptions_Deprecated bool = false const Default_FileOptions_CcEnableArenas bool = false @@ -1010,7 +1100,7 @@ func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { if m != nil && m.JavaGenerateEqualsAndHash != nil { return *m.JavaGenerateEqualsAndHash } - return Default_FileOptions_JavaGenerateEqualsAndHash + return false } func (m *FileOptions) GetJavaStringCheckUtf8() bool { @@ -1055,6 +1145,13 @@ func (m *FileOptions) GetPyGenericServices() bool { return Default_FileOptions_PyGenericServices } +func (m *FileOptions) GetPhpGenericServices() bool { + if m != nil && m.PhpGenericServices != nil { + return *m.PhpGenericServices + } + return Default_FileOptions_PhpGenericServices +} + func (m *FileOptions) GetDeprecated() bool { if m != nil && m.Deprecated != nil { return *m.Deprecated @@ -1083,11 +1180,25 @@ func (m *FileOptions) GetCsharpNamespace() string { return "" } -func (m *FileOptions) GetJavananoUseDeprecatedPackage() bool { - if m != nil && m.JavananoUseDeprecatedPackage != nil { - return *m.JavananoUseDeprecatedPackage +func (m *FileOptions) GetSwiftPrefix() string { + if m != nil && m.SwiftPrefix != nil { + return *m.SwiftPrefix } - return false + return "" +} + +func (m *FileOptions) GetPhpClassPrefix() string { + if m != nil && m.PhpClassPrefix != nil { + return *m.PhpClassPrefix + } + return "" +} + +func (m *FileOptions) GetPhpNamespace() string { + if m != nil && m.PhpNamespace != nil { + return *m.PhpNamespace + } + return "" } func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { @@ -1157,10 +1268,10 @@ type MessageOptions struct { func (m *MessageOptions) Reset() { *m = MessageOptions{} } func (m *MessageOptions) String() string { return proto.CompactTextString(m) } func (*MessageOptions) ProtoMessage() {} -func (*MessageOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{10} } +func (*MessageOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{11} } var extRange_MessageOptions = []proto.ExtensionRange{ - {1000, 536870911}, + {Start: 1000, End: 536870911}, } func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { @@ -1220,13 +1331,15 @@ type FieldOptions struct { Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` // The jstype option determines the JavaScript type used for values of the // field. The option is permitted only for 64 bit integral and fixed types - // (int64, uint64, sint64, fixed64, sfixed64). By default these types are - // represented as JavaScript strings. This avoids loss of precision that can - // happen when a large value is converted to a floating point JavaScript - // numbers. Specifying JS_NUMBER for the jstype causes the generated - // JavaScript code to use the JavaScript "number" type instead of strings. - // This option is an enum to permit additional types to be added, - // e.g. goog.math.Integer. + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` // Should this field be parsed lazily? Lazy applies only to message-type // fields. It means that when the outer message is initially parsed, the @@ -1247,7 +1360,7 @@ type FieldOptions struct { // // // Note that implementations may choose not to check required fields within - // a lazy sub-message. That is, calling IsInitialized() on the outher message + // a lazy sub-message. That is, calling IsInitialized() on the outer message // may return true even if the inner message has missing required fields. // This is necessary because otherwise the inner message would have to be // parsed in order to perform the check, defeating the purpose of lazy @@ -1273,10 +1386,10 @@ type FieldOptions struct { func (m *FieldOptions) Reset() { *m = FieldOptions{} } func (m *FieldOptions) String() string { return proto.CompactTextString(m) } func (*FieldOptions) ProtoMessage() {} -func (*FieldOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{11} } +func (*FieldOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{12} } var extRange_FieldOptions = []proto.ExtensionRange{ - {1000, 536870911}, + {Start: 1000, End: 536870911}, } func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { @@ -1338,6 +1451,33 @@ func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { return nil } +type OneofOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OneofOptions) Reset() { *m = OneofOptions{} } +func (m *OneofOptions) String() string { return proto.CompactTextString(m) } +func (*OneofOptions) ProtoMessage() {} +func (*OneofOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{13} } + +var extRange_OneofOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OneofOptions +} + +func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + type EnumOptions struct { // Set this option to true to allow mapping different tag names to the same // value. @@ -1356,10 +1496,10 @@ type EnumOptions struct { func (m *EnumOptions) Reset() { *m = EnumOptions{} } func (m *EnumOptions) String() string { return proto.CompactTextString(m) } func (*EnumOptions) ProtoMessage() {} -func (*EnumOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{12} } +func (*EnumOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{14} } var extRange_EnumOptions = []proto.ExtensionRange{ - {1000, 536870911}, + {Start: 1000, End: 536870911}, } func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { @@ -1404,10 +1544,10 @@ type EnumValueOptions struct { func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } func (*EnumValueOptions) ProtoMessage() {} -func (*EnumValueOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{13} } +func (*EnumValueOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{15} } var extRange_EnumValueOptions = []proto.ExtensionRange{ - {1000, 536870911}, + {Start: 1000, End: 536870911}, } func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { @@ -1445,10 +1585,10 @@ type ServiceOptions struct { func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } func (*ServiceOptions) ProtoMessage() {} -func (*ServiceOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{14} } +func (*ServiceOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{16} } var extRange_ServiceOptions = []proto.ExtensionRange{ - {1000, 536870911}, + {Start: 1000, End: 536870911}, } func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { @@ -1476,7 +1616,8 @@ type MethodOptions struct { // Depending on the target platform, this can emit Deprecated annotations // for the method, or it will be completely ignored; in the very least, // this is a formalization for deprecating methods. - Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` proto.XXX_InternalExtensions `json:"-"` @@ -1486,10 +1627,10 @@ type MethodOptions struct { func (m *MethodOptions) Reset() { *m = MethodOptions{} } func (m *MethodOptions) String() string { return proto.CompactTextString(m) } func (*MethodOptions) ProtoMessage() {} -func (*MethodOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{15} } +func (*MethodOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{17} } var extRange_MethodOptions = []proto.ExtensionRange{ - {1000, 536870911}, + {Start: 1000, End: 536870911}, } func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { @@ -1497,6 +1638,7 @@ func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { } const Default_MethodOptions_Deprecated bool = false +const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN func (m *MethodOptions) GetDeprecated() bool { if m != nil && m.Deprecated != nil { @@ -1505,6 +1647,13 @@ func (m *MethodOptions) GetDeprecated() bool { return Default_MethodOptions_Deprecated } +func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { + if m != nil && m.IdempotencyLevel != nil { + return *m.IdempotencyLevel + } + return Default_MethodOptions_IdempotencyLevel +} + func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { if m != nil { return m.UninterpretedOption @@ -1534,7 +1683,7 @@ type UninterpretedOption struct { func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } func (*UninterpretedOption) ProtoMessage() {} -func (*UninterpretedOption) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{16} } +func (*UninterpretedOption) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{18} } func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { if m != nil { @@ -1600,7 +1749,7 @@ func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOptio func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } func (*UninterpretedOption_NamePart) ProtoMessage() {} func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{16, 0} + return fileDescriptorDescriptor, []int{18, 0} } func (m *UninterpretedOption_NamePart) GetNamePart() string { @@ -1670,7 +1819,7 @@ type SourceCodeInfo struct { func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } func (*SourceCodeInfo) ProtoMessage() {} -func (*SourceCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{17} } +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{19} } func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { if m != nil { @@ -1767,7 +1916,7 @@ func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } func (*SourceCodeInfo_Location) ProtoMessage() {} func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{17, 0} + return fileDescriptorDescriptor, []int{19, 0} } func (m *SourceCodeInfo_Location) GetPath() []int32 { @@ -1805,12 +1954,86 @@ func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { return nil } +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +type GeneratedCodeInfo struct { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } +func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo) ProtoMessage() {} +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{20} } + +func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { + if m != nil { + return m.Annotation + } + return nil +} + +type GeneratedCodeInfo_Annotation struct { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Identifies the filesystem path to the original source .proto. + SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } +func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} +func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { + return fileDescriptorDescriptor, []int{20, 0} +} + +func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string { + if m != nil && m.SourceFile != nil { + return *m.SourceFile + } + return "" +} + +func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 { + if m != nil && m.Begin != nil { + return *m.Begin + } + return 0 +} + +func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + func init() { proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") + proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions") proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") @@ -1820,6 +2043,7 @@ func init() { proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions") proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions") proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions") + proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions") proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions") proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions") proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions") @@ -1828,152 +2052,172 @@ func init() { proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart") proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo") proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location") + proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo") + proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation") proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) + proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) } func init() { proto.RegisterFile("descriptor.proto", fileDescriptorDescriptor) } var fileDescriptorDescriptor = []byte{ - // 2192 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x58, 0x4f, 0x73, 0xdb, 0xd6, - 0x11, 0x2f, 0xff, 0x8a, 0x5c, 0x52, 0x24, 0xf4, 0xac, 0xd8, 0xb4, 0x62, 0xc7, 0x31, 0x63, 0xc7, - 0x8e, 0xd3, 0xd2, 0x19, 0xb7, 0x49, 0x5c, 0xa5, 0x93, 0x0e, 0x45, 0xc2, 0x0a, 0x3d, 0x94, 0xc8, - 0x3e, 0x92, 0xad, 0x93, 0x0b, 0x06, 0x02, 0x1f, 0x29, 0xd8, 0x20, 0xc0, 0x02, 0xa0, 0x6d, 0xe5, - 0xd4, 0x99, 0x9e, 0xfa, 0x0d, 0x3a, 0x6d, 0xa7, 0x87, 0x5c, 0x32, 0xd3, 0x0f, 0xd0, 0x43, 0xef, - 0xbd, 0xf6, 0xd0, 0x73, 0x8f, 0x9d, 0x69, 0xbf, 0x41, 0xaf, 0xdd, 0xf7, 0x1e, 0x00, 0x02, 0x24, - 0x15, 0xab, 0x99, 0x49, 0x13, 0x5d, 0xc4, 0xb7, 0xfb, 0xdb, 0xc5, 0xbe, 0x7d, 0xbf, 0xb7, 0xbb, - 0x00, 0x28, 0x63, 0xe6, 0x19, 0xae, 0x39, 0xf7, 0x1d, 0xb7, 0x31, 0x77, 0x1d, 0xdf, 0x21, 0xd5, - 0xa9, 0xe3, 0x4c, 0x2d, 0x26, 0x57, 0x27, 0x8b, 0x49, 0xfd, 0x08, 0x76, 0x1e, 0x99, 0x16, 0x6b, - 0x47, 0xc0, 0x01, 0xf3, 0xc9, 0x43, 0xc8, 0x4e, 0x50, 0x58, 0x4b, 0xbd, 0x99, 0xb9, 0x5b, 0x7a, - 0x70, 0xab, 0xb1, 0x62, 0xd4, 0x48, 0x5a, 0xf4, 0xb9, 0x98, 0x0a, 0x8b, 0xfa, 0x3f, 0xb3, 0x70, - 0x69, 0x83, 0x96, 0x10, 0xc8, 0xda, 0xfa, 0x8c, 0x7b, 0x4c, 0xdd, 0x2d, 0x52, 0xf1, 0x9b, 0xd4, - 0x60, 0x6b, 0xae, 0x1b, 0xcf, 0xf4, 0x29, 0xab, 0xa5, 0x85, 0x38, 0x5c, 0x92, 0x37, 0x00, 0xc6, - 0x6c, 0xce, 0xec, 0x31, 0xb3, 0x8d, 0xb3, 0x5a, 0x06, 0xa3, 0x28, 0xd2, 0x98, 0x84, 0xbc, 0x0b, - 0x3b, 0xf3, 0xc5, 0x89, 0x65, 0x1a, 0x5a, 0x0c, 0x06, 0x08, 0xcb, 0x51, 0x45, 0x2a, 0xda, 0x4b, - 0xf0, 0x1d, 0xa8, 0xbe, 0x60, 0xfa, 0xb3, 0x38, 0xb4, 0x24, 0xa0, 0x15, 0x2e, 0x8e, 0x01, 0x5b, - 0x50, 0x9e, 0x31, 0xcf, 0xc3, 0x00, 0x34, 0xff, 0x6c, 0xce, 0x6a, 0x59, 0xb1, 0xfb, 0x37, 0xd7, - 0x76, 0xbf, 0xba, 0xf3, 0x52, 0x60, 0x35, 0x44, 0x23, 0xd2, 0x84, 0x22, 0xb3, 0x17, 0x33, 0xe9, - 0x21, 0x77, 0x4e, 0xfe, 0x54, 0x44, 0xac, 0x7a, 0x29, 0x70, 0xb3, 0xc0, 0xc5, 0x96, 0xc7, 0xdc, - 0xe7, 0xa6, 0xc1, 0x6a, 0x79, 0xe1, 0xe0, 0xce, 0x9a, 0x83, 0x81, 0xd4, 0xaf, 0xfa, 0x08, 0xed, - 0x70, 0x2b, 0x45, 0xf6, 0xd2, 0x67, 0xb6, 0x67, 0x3a, 0x76, 0x6d, 0x4b, 0x38, 0xb9, 0xbd, 0xe1, - 0x14, 0x99, 0x35, 0x5e, 0x75, 0xb1, 0xb4, 0x23, 0x1f, 0xc0, 0x96, 0x33, 0xf7, 0xf1, 0x97, 0x57, - 0x2b, 0xe0, 0xf9, 0x94, 0x1e, 0x5c, 0xdb, 0x48, 0x84, 0x9e, 0xc4, 0xd0, 0x10, 0x4c, 0x3a, 0xa0, - 0x78, 0xce, 0xc2, 0x35, 0x98, 0x66, 0x38, 0x63, 0xa6, 0x99, 0xf6, 0xc4, 0xa9, 0x15, 0x85, 0x83, - 0x1b, 0xeb, 0x1b, 0x11, 0xc0, 0x16, 0xe2, 0x3a, 0x08, 0xa3, 0x15, 0x2f, 0xb1, 0x26, 0x97, 0x21, - 0xef, 0x9d, 0xd9, 0xbe, 0xfe, 0xb2, 0x56, 0x16, 0x0c, 0x09, 0x56, 0xf5, 0xff, 0xe4, 0xa0, 0x7a, - 0x11, 0x8a, 0x7d, 0x04, 0xb9, 0x09, 0xdf, 0x25, 0x12, 0xec, 0x7f, 0xc8, 0x81, 0xb4, 0x49, 0x26, - 0x31, 0xff, 0x35, 0x93, 0xd8, 0x84, 0x92, 0xcd, 0x3c, 0x9f, 0x8d, 0x25, 0x23, 0x32, 0x17, 0xe4, - 0x14, 0x48, 0xa3, 0x75, 0x4a, 0x65, 0xbf, 0x16, 0xa5, 0x9e, 0x40, 0x35, 0x0a, 0x49, 0x73, 0x75, - 0x7b, 0x1a, 0x72, 0xf3, 0xfe, 0xab, 0x22, 0x69, 0xa8, 0xa1, 0x1d, 0xe5, 0x66, 0xb4, 0xc2, 0x12, - 0x6b, 0xd2, 0x06, 0x70, 0x6c, 0xe6, 0x4c, 0xf0, 0x7a, 0x19, 0x16, 0xf2, 0x64, 0x73, 0x96, 0x7a, - 0x1c, 0xb2, 0x96, 0x25, 0x47, 0x4a, 0x0d, 0x8b, 0xfc, 0x78, 0x49, 0xb5, 0xad, 0x73, 0x98, 0x72, - 0x24, 0x2f, 0xd9, 0x1a, 0xdb, 0x46, 0x50, 0x71, 0x19, 0xe7, 0x3d, 0xa6, 0x58, 0xee, 0xac, 0x28, - 0x82, 0x68, 0xbc, 0x72, 0x67, 0x34, 0x30, 0x93, 0x1b, 0xdb, 0x76, 0xe3, 0x4b, 0xf2, 0x16, 0x44, - 0x02, 0x4d, 0xd0, 0x0a, 0x44, 0x15, 0x2a, 0x87, 0xc2, 0x63, 0x94, 0xed, 0x3d, 0x84, 0x4a, 0x32, - 0x3d, 0x64, 0x17, 0x72, 0x9e, 0xaf, 0xbb, 0xbe, 0x60, 0x61, 0x8e, 0xca, 0x05, 0x51, 0x20, 0x83, - 0x45, 0x46, 0x54, 0xb9, 0x1c, 0xe5, 0x3f, 0xf7, 0x3e, 0x84, 0xed, 0xc4, 0xe3, 0x2f, 0x6a, 0x58, - 0xff, 0x6d, 0x1e, 0x76, 0x37, 0x71, 0x6e, 0x23, 0xfd, 0xf1, 0xfa, 0x20, 0x03, 0x4e, 0x98, 0x8b, - 0xbc, 0xe3, 0x1e, 0x82, 0x15, 0x32, 0x2a, 0x67, 0xe9, 0x27, 0xcc, 0x42, 0x36, 0xa5, 0xee, 0x56, - 0x1e, 0xbc, 0x7b, 0x21, 0x56, 0x37, 0xba, 0xdc, 0x84, 0x4a, 0x4b, 0xf2, 0x31, 0x64, 0x83, 0x12, - 0xc7, 0x3d, 0xdc, 0xbb, 0x98, 0x07, 0xce, 0x45, 0x2a, 0xec, 0xc8, 0xeb, 0x50, 0xe4, 0xff, 0x65, - 0x6e, 0xf3, 0x22, 0xe6, 0x02, 0x17, 0xf0, 0xbc, 0x92, 0x3d, 0x28, 0x08, 0x9a, 0x8d, 0x59, 0xd8, - 0x1a, 0xa2, 0x35, 0x3f, 0x98, 0x31, 0x9b, 0xe8, 0x0b, 0xcb, 0xd7, 0x9e, 0xeb, 0xd6, 0x82, 0x09, - 0xc2, 0xe0, 0xc1, 0x04, 0xc2, 0x9f, 0x73, 0x19, 0xb9, 0x01, 0x25, 0xc9, 0x4a, 0x13, 0x6d, 0x5e, - 0x8a, 0xea, 0x93, 0xa3, 0x92, 0xa8, 0x1d, 0x2e, 0xe1, 0x8f, 0x7f, 0xea, 0xe1, 0x5d, 0x08, 0x8e, - 0x56, 0x3c, 0x82, 0x0b, 0xc4, 0xe3, 0x3f, 0x5c, 0x2d, 0x7c, 0xd7, 0x37, 0x6f, 0x6f, 0x95, 0x8b, - 0xf5, 0x3f, 0xa7, 0x21, 0x2b, 0xee, 0x5b, 0x15, 0x4a, 0xc3, 0x4f, 0xfb, 0xaa, 0xd6, 0xee, 0x8d, - 0x0e, 0xba, 0xaa, 0x92, 0x22, 0x15, 0x00, 0x21, 0x78, 0xd4, 0xed, 0x35, 0x87, 0x4a, 0x3a, 0x5a, - 0x77, 0x8e, 0x87, 0x1f, 0xfc, 0x48, 0xc9, 0x44, 0x06, 0x23, 0x29, 0xc8, 0xc6, 0x01, 0x3f, 0x7c, - 0xa0, 0xe4, 0x90, 0x09, 0x65, 0xe9, 0xa0, 0xf3, 0x44, 0x6d, 0x23, 0x22, 0x9f, 0x94, 0x20, 0x66, - 0x8b, 0x6c, 0x43, 0x51, 0x48, 0x0e, 0x7a, 0xbd, 0xae, 0x52, 0x88, 0x7c, 0x0e, 0x86, 0xb4, 0x73, - 0x7c, 0xa8, 0x14, 0x23, 0x9f, 0x87, 0xb4, 0x37, 0xea, 0x2b, 0x10, 0x79, 0x38, 0x52, 0x07, 0x83, - 0xe6, 0xa1, 0xaa, 0x94, 0x22, 0xc4, 0xc1, 0xa7, 0x43, 0x75, 0xa0, 0x94, 0x13, 0x61, 0xe1, 0x23, - 0xb6, 0xa3, 0x47, 0xa8, 0xc7, 0xa3, 0x23, 0xa5, 0x42, 0x76, 0x60, 0x5b, 0x3e, 0x22, 0x0c, 0xa2, - 0xba, 0x22, 0xc2, 0x48, 0x95, 0x65, 0x20, 0xd2, 0xcb, 0x4e, 0x42, 0x80, 0x08, 0x52, 0x6f, 0x41, - 0x4e, 0xb0, 0x0b, 0x59, 0x5c, 0xe9, 0x36, 0x0f, 0xd4, 0xae, 0xd6, 0xeb, 0x0f, 0x3b, 0xbd, 0xe3, - 0x66, 0x17, 0x73, 0x17, 0xc9, 0xa8, 0xfa, 0xb3, 0x51, 0x87, 0xaa, 0x6d, 0xcc, 0x5f, 0x4c, 0xd6, - 0x57, 0x9b, 0x43, 0x94, 0x65, 0xea, 0xf7, 0x60, 0x77, 0x53, 0x9d, 0xd9, 0x74, 0x33, 0xea, 0x5f, - 0xa4, 0xe0, 0xd2, 0x86, 0x92, 0xb9, 0xf1, 0x16, 0xfd, 0x14, 0x72, 0x92, 0x69, 0xb2, 0x89, 0xbc, - 0xb3, 0xb1, 0xf6, 0x0a, 0xde, 0xad, 0x35, 0x12, 0x61, 0x17, 0x6f, 0xa4, 0x99, 0x73, 0x1a, 0x29, - 0x77, 0xb1, 0x46, 0xa7, 0x5f, 0xa7, 0xa0, 0x76, 0x9e, 0xef, 0x57, 0xdc, 0xf7, 0x74, 0xe2, 0xbe, - 0x7f, 0xb4, 0x1a, 0xc0, 0xcd, 0xf3, 0xf7, 0xb0, 0x16, 0xc5, 0x97, 0x29, 0xb8, 0xbc, 0x79, 0xde, - 0xd8, 0x18, 0xc3, 0xc7, 0x90, 0x9f, 0x31, 0xff, 0xd4, 0x09, 0x7b, 0xee, 0xdb, 0x1b, 0x2a, 0x39, - 0x57, 0xaf, 0xe6, 0x2a, 0xb0, 0x8a, 0xb7, 0x82, 0xcc, 0x79, 0x43, 0x83, 0x8c, 0x66, 0x2d, 0xd2, - 0xdf, 0xa4, 0xe1, 0xb5, 0x8d, 0xce, 0x37, 0x06, 0x7a, 0x1d, 0xc0, 0xb4, 0xe7, 0x0b, 0x5f, 0xf6, - 0x55, 0x59, 0x66, 0x8a, 0x42, 0x22, 0xae, 0x30, 0x2f, 0x21, 0x0b, 0x3f, 0xd2, 0x67, 0x84, 0x1e, - 0xa4, 0x48, 0x00, 0x1e, 0x2e, 0x03, 0xcd, 0x8a, 0x40, 0xdf, 0x38, 0x67, 0xa7, 0x6b, 0x2d, 0xeb, - 0x3d, 0x50, 0x0c, 0xcb, 0x64, 0xb6, 0xaf, 0x79, 0xbe, 0xcb, 0xf4, 0x99, 0x69, 0x4f, 0x45, 0x1d, - 0x2d, 0xec, 0xe7, 0x26, 0xba, 0xe5, 0x31, 0x5a, 0x95, 0xea, 0x41, 0xa8, 0xe5, 0x16, 0xa2, 0x59, - 0xb8, 0x31, 0x8b, 0x7c, 0xc2, 0x42, 0xaa, 0x23, 0x8b, 0xfa, 0xdf, 0xb7, 0xa0, 0x14, 0x9b, 0xce, - 0xc8, 0x4d, 0x28, 0x3f, 0xd5, 0x9f, 0xeb, 0x5a, 0x38, 0x71, 0xcb, 0x4c, 0x94, 0xb8, 0xac, 0x1f, - 0x4c, 0xdd, 0xef, 0xc1, 0xae, 0x80, 0xe0, 0x1e, 0xf1, 0x41, 0x86, 0xa5, 0x7b, 0x9e, 0x48, 0x5a, - 0x41, 0x40, 0x09, 0xd7, 0xf5, 0xb8, 0xaa, 0x15, 0x6a, 0xc8, 0xfb, 0x70, 0x49, 0x58, 0xcc, 0xb0, - 0xf0, 0x9a, 0x73, 0x8b, 0x69, 0xfc, 0x1d, 0xc0, 0x13, 0xf5, 0x34, 0x8a, 0x6c, 0x87, 0x23, 0x8e, - 0x02, 0x00, 0x8f, 0xc8, 0x23, 0x87, 0x70, 0x5d, 0x98, 0x4d, 0x99, 0xcd, 0x5c, 0xdd, 0x67, 0x1a, - 0xfb, 0xe5, 0x02, 0xb1, 0x9a, 0x6e, 0x8f, 0xb5, 0x53, 0xdd, 0x3b, 0xad, 0xed, 0xc6, 0x1d, 0x5c, - 0xe5, 0xd8, 0xc3, 0x00, 0xaa, 0x0a, 0x64, 0xd3, 0x1e, 0x7f, 0x82, 0x38, 0xb2, 0x0f, 0x97, 0x85, - 0x23, 0x4c, 0x0a, 0xee, 0x59, 0x33, 0x4e, 0x99, 0xf1, 0x4c, 0x5b, 0xf8, 0x93, 0x87, 0xb5, 0xd7, - 0xe3, 0x1e, 0x44, 0x90, 0x03, 0x81, 0x69, 0x71, 0xc8, 0x08, 0x11, 0x64, 0x00, 0x65, 0x7e, 0x1e, - 0x33, 0xf3, 0x73, 0x0c, 0xdb, 0x71, 0x45, 0x8f, 0xa8, 0x6c, 0xb8, 0xdc, 0xb1, 0x24, 0x36, 0x7a, - 0x81, 0xc1, 0x11, 0xce, 0xa7, 0xfb, 0xb9, 0x41, 0x5f, 0x55, 0xdb, 0xb4, 0x14, 0x7a, 0x79, 0xe4, - 0xb8, 0x9c, 0x53, 0x53, 0x27, 0xca, 0x71, 0x49, 0x72, 0x6a, 0xea, 0x84, 0x19, 0xc6, 0x7c, 0x19, - 0x86, 0xdc, 0x36, 0xbe, 0xbb, 0x04, 0xc3, 0xba, 0x57, 0x53, 0x12, 0xf9, 0x32, 0x8c, 0x43, 0x09, - 0x08, 0x68, 0xee, 0xe1, 0x95, 0x78, 0x6d, 0x99, 0xaf, 0xb8, 0xe1, 0xce, 0xda, 0x2e, 0x57, 0x4d, - 0xf1, 0x89, 0xf3, 0xb3, 0x75, 0x43, 0x92, 0x78, 0xe2, 0xfc, 0x6c, 0xd5, 0xec, 0xb6, 0x78, 0x01, - 0x73, 0x99, 0x81, 0x29, 0x1f, 0xd7, 0xae, 0xc4, 0xd1, 0x31, 0x05, 0xb9, 0x8f, 0x44, 0x36, 0x34, - 0x66, 0xeb, 0x27, 0x78, 0xf6, 0xba, 0x8b, 0x3f, 0xbc, 0xda, 0x8d, 0x38, 0xb8, 0x62, 0x18, 0xaa, - 0xd0, 0x36, 0x85, 0x92, 0xdc, 0x83, 0x1d, 0xe7, 0xe4, 0xa9, 0x21, 0xc9, 0xa5, 0xa1, 0x9f, 0x89, - 0xf9, 0xb2, 0x76, 0x4b, 0xa4, 0xa9, 0xca, 0x15, 0x82, 0x5a, 0x7d, 0x21, 0x26, 0xef, 0xa0, 0x73, - 0xef, 0x54, 0x77, 0xe7, 0xa2, 0x49, 0x7b, 0x98, 0x54, 0x56, 0xbb, 0x2d, 0xa1, 0x52, 0x7e, 0x1c, - 0x8a, 0x89, 0x0a, 0x37, 0xf8, 0xe6, 0x6d, 0xdd, 0x76, 0xb4, 0x85, 0xc7, 0xb4, 0x65, 0x88, 0xd1, - 0x59, 0xbc, 0xcd, 0xc3, 0xa2, 0xd7, 0x42, 0xd8, 0xc8, 0xc3, 0x62, 0x16, 0x82, 0xc2, 0xe3, 0x79, - 0x02, 0xbb, 0x0b, 0xdb, 0xb4, 0x91, 0xe2, 0xa8, 0xe1, 0xc6, 0xf2, 0xc2, 0xd6, 0xfe, 0xb5, 0x75, - 0xce, 0xd0, 0x3d, 0x8a, 0xa3, 0x25, 0x49, 0xe8, 0xa5, 0xc5, 0xba, 0xb0, 0xbe, 0x0f, 0xe5, 0x38, - 0x77, 0x48, 0x11, 0x24, 0x7b, 0xb0, 0xbb, 0x61, 0x47, 0x6d, 0xf5, 0xda, 0xbc, 0x17, 0x7e, 0xa6, - 0x62, 0x63, 0xc3, 0x9e, 0xdc, 0xed, 0x0c, 0x55, 0x8d, 0x8e, 0x8e, 0x87, 0x9d, 0x23, 0x55, 0xc9, - 0xdc, 0x2b, 0x16, 0xfe, 0xbd, 0xa5, 0xfc, 0x0a, 0xff, 0xd2, 0xf5, 0xbf, 0xa6, 0xa1, 0x92, 0x9c, - 0x83, 0xc9, 0x4f, 0xe0, 0x4a, 0xf8, 0xd2, 0xea, 0x31, 0x5f, 0x7b, 0x61, 0xba, 0x82, 0xce, 0x33, - 0x5d, 0x4e, 0x92, 0xd1, 0x49, 0xec, 0x06, 0x28, 0x7c, 0xbd, 0xff, 0x05, 0x62, 0x1e, 0x09, 0x08, - 0xe9, 0xc2, 0x0d, 0x4c, 0x19, 0xce, 0x9a, 0xf6, 0x58, 0x77, 0xc7, 0xda, 0xf2, 0x73, 0x81, 0xa6, - 0x1b, 0xc8, 0x03, 0xcf, 0x91, 0x9d, 0x24, 0xf2, 0x72, 0xcd, 0x76, 0x06, 0x01, 0x78, 0x59, 0x62, - 0x9b, 0x01, 0x74, 0x85, 0x35, 0x99, 0xf3, 0x58, 0x83, 0xb3, 0xd7, 0x4c, 0x9f, 0x23, 0x6d, 0x7c, - 0xf7, 0x4c, 0x4c, 0x6f, 0x05, 0x5a, 0x40, 0x81, 0xca, 0xd7, 0xdf, 0xdc, 0x19, 0xc4, 0xf3, 0xf8, - 0x8f, 0x0c, 0x94, 0xe3, 0x13, 0x1c, 0x1f, 0x88, 0x0d, 0x51, 0xe6, 0x53, 0xa2, 0x0a, 0xbc, 0xf5, - 0x95, 0xf3, 0x5e, 0xa3, 0xc5, 0xeb, 0xff, 0x7e, 0x5e, 0xce, 0x55, 0x54, 0x5a, 0xf2, 0xde, 0xcb, - 0xb9, 0xc6, 0xe4, 0xb4, 0x5e, 0xa0, 0xc1, 0x0a, 0x8b, 0x5d, 0xfe, 0xa9, 0x27, 0x7c, 0xe7, 0x85, - 0xef, 0x5b, 0x5f, 0xed, 0xfb, 0xf1, 0x40, 0x38, 0x2f, 0x3e, 0x1e, 0x68, 0xc7, 0x3d, 0x7a, 0xd4, - 0xec, 0xd2, 0xc0, 0x9c, 0x5c, 0x85, 0xac, 0xa5, 0x7f, 0x7e, 0x96, 0xec, 0x14, 0x42, 0x74, 0xd1, - 0xc4, 0xa3, 0x07, 0xfe, 0xc9, 0x23, 0x59, 0x9f, 0x85, 0xe8, 0x1b, 0xa4, 0xfe, 0x7d, 0xc8, 0x89, - 0x7c, 0x11, 0x80, 0x20, 0x63, 0xca, 0xf7, 0x48, 0x01, 0xb2, 0xad, 0x1e, 0xe5, 0xf4, 0x47, 0xbe, - 0x4b, 0xa9, 0xd6, 0xef, 0xa8, 0x2d, 0xbc, 0x01, 0xf5, 0xf7, 0x21, 0x2f, 0x93, 0xc0, 0xaf, 0x46, - 0x94, 0x06, 0x34, 0x92, 0xcb, 0xc0, 0x47, 0x2a, 0xd4, 0x8e, 0x8e, 0x0e, 0x54, 0xaa, 0xa4, 0xe3, - 0xc7, 0xfb, 0x97, 0x14, 0x94, 0x62, 0x03, 0x15, 0x6f, 0xe5, 0xba, 0x65, 0x39, 0x2f, 0x34, 0xdd, - 0x32, 0xb1, 0x42, 0xc9, 0xf3, 0x01, 0x21, 0x6a, 0x72, 0xc9, 0x45, 0xf3, 0xf7, 0x7f, 0xe1, 0xe6, - 0x1f, 0x53, 0xa0, 0xac, 0x0e, 0x63, 0x2b, 0x01, 0xa6, 0xbe, 0xd5, 0x00, 0xff, 0x90, 0x82, 0x4a, - 0x72, 0x02, 0x5b, 0x09, 0xef, 0xe6, 0xb7, 0x1a, 0xde, 0xef, 0x53, 0xb0, 0x9d, 0x98, 0xbb, 0xbe, - 0x53, 0xd1, 0xfd, 0x2e, 0x03, 0x97, 0x36, 0xd8, 0x61, 0x01, 0x92, 0x03, 0xaa, 0x9c, 0x99, 0x7f, - 0x70, 0x91, 0x67, 0x35, 0x78, 0xff, 0xeb, 0xeb, 0xae, 0x1f, 0xcc, 0xb3, 0xd8, 0x2f, 0xcd, 0x31, - 0x16, 0x55, 0x73, 0x62, 0xe2, 0xf8, 0x26, 0xdf, 0x58, 0xe4, 0xd4, 0x5a, 0x5d, 0xca, 0xe5, 0xeb, - 0xf1, 0xf7, 0x81, 0xcc, 0x1d, 0xcf, 0xf4, 0xcd, 0xe7, 0xfc, 0xf3, 0x5c, 0xf8, 0x22, 0xcd, 0xa7, - 0xd8, 0x2c, 0x55, 0x42, 0x4d, 0xc7, 0xf6, 0x23, 0xb4, 0xcd, 0xa6, 0xfa, 0x0a, 0x9a, 0x97, 0xa1, - 0x0c, 0x55, 0x42, 0x4d, 0x84, 0xc6, 0x41, 0x73, 0xec, 0x2c, 0xf8, 0x40, 0x20, 0x71, 0xbc, 0xea, - 0xa5, 0x68, 0x49, 0xca, 0x22, 0x48, 0x30, 0xb1, 0x2d, 0xdf, 0xe0, 0xcb, 0xb4, 0x24, 0x65, 0x12, - 0x72, 0x07, 0xaa, 0xfa, 0x74, 0xea, 0x72, 0xe7, 0xa1, 0x23, 0x39, 0x86, 0x56, 0x22, 0xb1, 0x00, - 0xee, 0x3d, 0x86, 0x42, 0x98, 0x07, 0xde, 0x58, 0x78, 0x26, 0xb0, 0xe7, 0x8b, 0xef, 0x28, 0x69, - 0xfe, 0x52, 0x6f, 0x87, 0x4a, 0x7c, 0xa8, 0xe9, 0x69, 0xcb, 0x0f, 0x7a, 0x69, 0xd4, 0x17, 0x68, - 0xc9, 0xf4, 0xa2, 0x2f, 0x38, 0xf5, 0x2f, 0xb1, 0xbd, 0x26, 0x3f, 0x48, 0x92, 0x36, 0x14, 0x2c, - 0x07, 0xf9, 0xc1, 0x2d, 0xe4, 0xd7, 0xf0, 0xbb, 0xaf, 0xf8, 0x86, 0xd9, 0xe8, 0x06, 0x78, 0x1a, - 0x59, 0xee, 0xfd, 0x2d, 0x05, 0x85, 0x50, 0x8c, 0x8d, 0x22, 0x3b, 0xd7, 0xfd, 0x53, 0xe1, 0x2e, - 0x77, 0x90, 0x56, 0x52, 0x54, 0xac, 0xb9, 0x1c, 0xa7, 0x19, 0x5b, 0x50, 0x20, 0x90, 0xf3, 0x35, - 0x3f, 0x57, 0x8b, 0xe9, 0x63, 0x31, 0xe0, 0x3a, 0xb3, 0x19, 0x9e, 0xa4, 0x17, 0x9e, 0x6b, 0x20, - 0x6f, 0x05, 0x62, 0xfe, 0x5d, 0xdc, 0x77, 0x75, 0xd3, 0x4a, 0x60, 0xb3, 0x02, 0xab, 0x84, 0x8a, - 0x08, 0xbc, 0x0f, 0x57, 0x43, 0xbf, 0x63, 0xe6, 0xeb, 0x38, 0x3c, 0x8f, 0x97, 0x46, 0x79, 0xf1, - 0xb5, 0xeb, 0x4a, 0x00, 0x68, 0x07, 0xfa, 0xd0, 0xf6, 0xe0, 0x09, 0x0e, 0xb2, 0xce, 0x6c, 0x35, - 0x13, 0x07, 0xca, 0xca, 0x7b, 0x97, 0xf7, 0x49, 0xea, 0x33, 0x58, 0x0e, 0x15, 0x5f, 0xa4, 0x33, - 0x87, 0xfd, 0x83, 0x3f, 0xa5, 0xf7, 0x0e, 0xa5, 0x5d, 0x3f, 0xcc, 0x20, 0x65, 0x13, 0x8b, 0x19, - 0x3c, 0x3b, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x78, 0x42, 0x69, 0x71, 0xb3, 0x18, 0x00, 0x00, + // 2451 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xcd, 0x6f, 0xdb, 0xc8, + 0x15, 0x5f, 0x7d, 0x5a, 0x7a, 0x92, 0xe5, 0xf1, 0xd8, 0x9b, 0x30, 0xce, 0x66, 0xe3, 0x28, 0xc9, + 0xc6, 0x49, 0x5a, 0x65, 0xe1, 0x7c, 0xae, 0xb7, 0xd8, 0x56, 0x96, 0x18, 0xaf, 0x52, 0x59, 0x52, + 0x29, 0xb9, 0x9b, 0xec, 0x85, 0x18, 0x93, 0x23, 0x89, 0x09, 0x45, 0x72, 0x49, 0x2a, 0x89, 0x83, + 0x1e, 0x02, 0xf4, 0xd4, 0xff, 0xa0, 0x28, 0x8a, 0x1e, 0xf6, 0xb2, 0x40, 0xaf, 0x05, 0x0a, 0xb4, + 0xf7, 0x5e, 0x0b, 0xf4, 0xde, 0x43, 0x0f, 0x05, 0xda, 0x3f, 0xa3, 0x98, 0x19, 0x92, 0xa2, 0xbe, + 0x12, 0x77, 0x81, 0x64, 0x4f, 0xf6, 0xfc, 0xde, 0xef, 0xbd, 0x79, 0xf3, 0xf8, 0x66, 0xde, 0x9b, + 0x11, 0x20, 0x9d, 0x7a, 0x9a, 0x6b, 0x38, 0xbe, 0xed, 0x56, 0x1c, 0xd7, 0xf6, 0x6d, 0xbc, 0x36, + 0xb0, 0xed, 0x81, 0x49, 0xc5, 0xe8, 0x78, 0xdc, 0x2f, 0x1f, 0xc2, 0xfa, 0x43, 0xc3, 0xa4, 0xf5, + 0x88, 0xd8, 0xa5, 0x3e, 0x7e, 0x00, 0xe9, 0xbe, 0x61, 0x52, 0x29, 0xb1, 0x9d, 0xda, 0x29, 0xec, + 0x5e, 0xa9, 0xcc, 0x28, 0x55, 0xa6, 0x35, 0x3a, 0x0c, 0x56, 0xb8, 0x46, 0xf9, 0xdf, 0x69, 0xd8, + 0x58, 0x20, 0xc5, 0x18, 0xd2, 0x16, 0x19, 0x31, 0x8b, 0x89, 0x9d, 0xbc, 0xc2, 0xff, 0xc7, 0x12, + 0xac, 0x38, 0x44, 0x7b, 0x46, 0x06, 0x54, 0x4a, 0x72, 0x38, 0x1c, 0xe2, 0x8f, 0x01, 0x74, 0xea, + 0x50, 0x4b, 0xa7, 0x96, 0x76, 0x22, 0xa5, 0xb6, 0x53, 0x3b, 0x79, 0x25, 0x86, 0xe0, 0x9b, 0xb0, + 0xee, 0x8c, 0x8f, 0x4d, 0x43, 0x53, 0x63, 0x34, 0xd8, 0x4e, 0xed, 0x64, 0x14, 0x24, 0x04, 0xf5, + 0x09, 0xf9, 0x1a, 0xac, 0xbd, 0xa0, 0xe4, 0x59, 0x9c, 0x5a, 0xe0, 0xd4, 0x12, 0x83, 0x63, 0xc4, + 0x1a, 0x14, 0x47, 0xd4, 0xf3, 0xc8, 0x80, 0xaa, 0xfe, 0x89, 0x43, 0xa5, 0x34, 0x5f, 0xfd, 0xf6, + 0xdc, 0xea, 0x67, 0x57, 0x5e, 0x08, 0xb4, 0x7a, 0x27, 0x0e, 0xc5, 0x55, 0xc8, 0x53, 0x6b, 0x3c, + 0x12, 0x16, 0x32, 0x4b, 0xe2, 0x27, 0x5b, 0xe3, 0xd1, 0xac, 0x95, 0x1c, 0x53, 0x0b, 0x4c, 0xac, + 0x78, 0xd4, 0x7d, 0x6e, 0x68, 0x54, 0xca, 0x72, 0x03, 0xd7, 0xe6, 0x0c, 0x74, 0x85, 0x7c, 0xd6, + 0x46, 0xa8, 0x87, 0x6b, 0x90, 0xa7, 0x2f, 0x7d, 0x6a, 0x79, 0x86, 0x6d, 0x49, 0x2b, 0xdc, 0xc8, + 0xd5, 0x05, 0x5f, 0x91, 0x9a, 0xfa, 0xac, 0x89, 0x89, 0x1e, 0xbe, 0x07, 0x2b, 0xb6, 0xe3, 0x1b, + 0xb6, 0xe5, 0x49, 0xb9, 0xed, 0xc4, 0x4e, 0x61, 0xf7, 0xa3, 0x85, 0x89, 0xd0, 0x16, 0x1c, 0x25, + 0x24, 0xe3, 0x06, 0x20, 0xcf, 0x1e, 0xbb, 0x1a, 0x55, 0x35, 0x5b, 0xa7, 0xaa, 0x61, 0xf5, 0x6d, + 0x29, 0xcf, 0x0d, 0x5c, 0x9c, 0x5f, 0x08, 0x27, 0xd6, 0x6c, 0x9d, 0x36, 0xac, 0xbe, 0xad, 0x94, + 0xbc, 0xa9, 0x31, 0x3e, 0x03, 0x59, 0xef, 0xc4, 0xf2, 0xc9, 0x4b, 0xa9, 0xc8, 0x33, 0x24, 0x18, + 0x95, 0xff, 0x92, 0x85, 0xb5, 0xd3, 0xa4, 0xd8, 0xe7, 0x90, 0xe9, 0xb3, 0x55, 0x4a, 0xc9, 0xff, + 0x27, 0x06, 0x42, 0x67, 0x3a, 0x88, 0xd9, 0xef, 0x19, 0xc4, 0x2a, 0x14, 0x2c, 0xea, 0xf9, 0x54, + 0x17, 0x19, 0x91, 0x3a, 0x65, 0x4e, 0x81, 0x50, 0x9a, 0x4f, 0xa9, 0xf4, 0xf7, 0x4a, 0xa9, 0xc7, + 0xb0, 0x16, 0xb9, 0xa4, 0xba, 0xc4, 0x1a, 0x84, 0xb9, 0x79, 0xeb, 0x6d, 0x9e, 0x54, 0xe4, 0x50, + 0x4f, 0x61, 0x6a, 0x4a, 0x89, 0x4e, 0x8d, 0x71, 0x1d, 0xc0, 0xb6, 0xa8, 0xdd, 0x57, 0x75, 0xaa, + 0x99, 0x52, 0x6e, 0x49, 0x94, 0xda, 0x8c, 0x32, 0x17, 0x25, 0x5b, 0xa0, 0x9a, 0x89, 0x3f, 0x9b, + 0xa4, 0xda, 0xca, 0x92, 0x4c, 0x39, 0x14, 0x9b, 0x6c, 0x2e, 0xdb, 0x8e, 0xa0, 0xe4, 0x52, 0x96, + 0xf7, 0x54, 0x0f, 0x56, 0x96, 0xe7, 0x4e, 0x54, 0xde, 0xba, 0x32, 0x25, 0x50, 0x13, 0x0b, 0x5b, + 0x75, 0xe3, 0x43, 0x7c, 0x19, 0x22, 0x40, 0xe5, 0x69, 0x05, 0xfc, 0x14, 0x2a, 0x86, 0x60, 0x8b, + 0x8c, 0xe8, 0xd6, 0x2b, 0x28, 0x4d, 0x87, 0x07, 0x6f, 0x42, 0xc6, 0xf3, 0x89, 0xeb, 0xf3, 0x2c, + 0xcc, 0x28, 0x62, 0x80, 0x11, 0xa4, 0xa8, 0xa5, 0xf3, 0x53, 0x2e, 0xa3, 0xb0, 0x7f, 0xf1, 0xcf, + 0x26, 0x0b, 0x4e, 0xf1, 0x05, 0x7f, 0x32, 0xff, 0x45, 0xa7, 0x2c, 0xcf, 0xae, 0x7b, 0xeb, 0x3e, + 0xac, 0x4e, 0x2d, 0xe0, 0xb4, 0x53, 0x97, 0x7f, 0x05, 0x1f, 0x2e, 0x34, 0x8d, 0x1f, 0xc3, 0xe6, + 0xd8, 0x32, 0x2c, 0x9f, 0xba, 0x8e, 0x4b, 0x59, 0xc6, 0x8a, 0xa9, 0xa4, 0xff, 0xac, 0x2c, 0xc9, + 0xb9, 0xa3, 0x38, 0x5b, 0x58, 0x51, 0x36, 0xc6, 0xf3, 0xe0, 0x8d, 0x7c, 0xee, 0xbf, 0x2b, 0xe8, + 0xf5, 0xeb, 0xd7, 0xaf, 0x93, 0xe5, 0xdf, 0x66, 0x61, 0x73, 0xd1, 0x9e, 0x59, 0xb8, 0x7d, 0xcf, + 0x40, 0xd6, 0x1a, 0x8f, 0x8e, 0xa9, 0xcb, 0x83, 0x94, 0x51, 0x82, 0x11, 0xae, 0x42, 0xc6, 0x24, + 0xc7, 0xd4, 0x94, 0xd2, 0xdb, 0x89, 0x9d, 0xd2, 0xee, 0xcd, 0x53, 0xed, 0xca, 0x4a, 0x93, 0xa9, + 0x28, 0x42, 0x13, 0x7f, 0x01, 0xe9, 0xe0, 0x88, 0x66, 0x16, 0x6e, 0x9c, 0xce, 0x02, 0xdb, 0x4b, + 0x0a, 0xd7, 0xc3, 0xe7, 0x21, 0xcf, 0xfe, 0x8a, 0xdc, 0xc8, 0x72, 0x9f, 0x73, 0x0c, 0x60, 0x79, + 0x81, 0xb7, 0x20, 0xc7, 0xb7, 0x89, 0x4e, 0xc3, 0xd2, 0x16, 0x8d, 0x59, 0x62, 0xe9, 0xb4, 0x4f, + 0xc6, 0xa6, 0xaf, 0x3e, 0x27, 0xe6, 0x98, 0xf2, 0x84, 0xcf, 0x2b, 0xc5, 0x00, 0xfc, 0x25, 0xc3, + 0xf0, 0x45, 0x28, 0x88, 0x5d, 0x65, 0x58, 0x3a, 0x7d, 0xc9, 0x4f, 0xcf, 0x8c, 0x22, 0x36, 0x5a, + 0x83, 0x21, 0x6c, 0xfa, 0xa7, 0x9e, 0x6d, 0x85, 0xa9, 0xc9, 0xa7, 0x60, 0x00, 0x9f, 0xfe, 0xfe, + 0xec, 0xc1, 0x7d, 0x61, 0xf1, 0xf2, 0x66, 0x73, 0xaa, 0xfc, 0xe7, 0x24, 0xa4, 0xf9, 0x79, 0xb1, + 0x06, 0x85, 0xde, 0x93, 0x8e, 0xac, 0xd6, 0xdb, 0x47, 0xfb, 0x4d, 0x19, 0x25, 0x70, 0x09, 0x80, + 0x03, 0x0f, 0x9b, 0xed, 0x6a, 0x0f, 0x25, 0xa3, 0x71, 0xa3, 0xd5, 0xbb, 0x77, 0x07, 0xa5, 0x22, + 0x85, 0x23, 0x01, 0xa4, 0xe3, 0x84, 0xdb, 0xbb, 0x28, 0x83, 0x11, 0x14, 0x85, 0x81, 0xc6, 0x63, + 0xb9, 0x7e, 0xef, 0x0e, 0xca, 0x4e, 0x23, 0xb7, 0x77, 0xd1, 0x0a, 0x5e, 0x85, 0x3c, 0x47, 0xf6, + 0xdb, 0xed, 0x26, 0xca, 0x45, 0x36, 0xbb, 0x3d, 0xa5, 0xd1, 0x3a, 0x40, 0xf9, 0xc8, 0xe6, 0x81, + 0xd2, 0x3e, 0xea, 0x20, 0x88, 0x2c, 0x1c, 0xca, 0xdd, 0x6e, 0xf5, 0x40, 0x46, 0x85, 0x88, 0xb1, + 0xff, 0xa4, 0x27, 0x77, 0x51, 0x71, 0xca, 0xad, 0xdb, 0xbb, 0x68, 0x35, 0x9a, 0x42, 0x6e, 0x1d, + 0x1d, 0xa2, 0x12, 0x5e, 0x87, 0x55, 0x31, 0x45, 0xe8, 0xc4, 0xda, 0x0c, 0x74, 0xef, 0x0e, 0x42, + 0x13, 0x47, 0x84, 0x95, 0xf5, 0x29, 0xe0, 0xde, 0x1d, 0x84, 0xcb, 0x35, 0xc8, 0xf0, 0xec, 0xc2, + 0x18, 0x4a, 0xcd, 0xea, 0xbe, 0xdc, 0x54, 0xdb, 0x9d, 0x5e, 0xa3, 0xdd, 0xaa, 0x36, 0x51, 0x62, + 0x82, 0x29, 0xf2, 0x2f, 0x8e, 0x1a, 0x8a, 0x5c, 0x47, 0xc9, 0x38, 0xd6, 0x91, 0xab, 0x3d, 0xb9, + 0x8e, 0x52, 0x65, 0x0d, 0x36, 0x17, 0x9d, 0x93, 0x0b, 0x77, 0x46, 0xec, 0x13, 0x27, 0x97, 0x7c, + 0x62, 0x6e, 0x6b, 0xee, 0x13, 0x7f, 0x9b, 0x80, 0x8d, 0x05, 0xb5, 0x62, 0xe1, 0x24, 0x3f, 0x85, + 0x8c, 0x48, 0x51, 0x51, 0x3d, 0xaf, 0x2f, 0x2c, 0x3a, 0x3c, 0x61, 0xe7, 0x2a, 0x28, 0xd7, 0x8b, + 0x77, 0x10, 0xa9, 0x25, 0x1d, 0x04, 0x33, 0x31, 0xe7, 0xe4, 0xaf, 0x13, 0x20, 0x2d, 0xb3, 0xfd, + 0x96, 0x83, 0x22, 0x39, 0x75, 0x50, 0x7c, 0x3e, 0xeb, 0xc0, 0xa5, 0xe5, 0x6b, 0x98, 0xf3, 0xe2, + 0xbb, 0x04, 0x9c, 0x59, 0xdc, 0x68, 0x2d, 0xf4, 0xe1, 0x0b, 0xc8, 0x8e, 0xa8, 0x3f, 0xb4, 0xc3, + 0x66, 0xe3, 0x93, 0x05, 0x25, 0x8c, 0x89, 0x67, 0x63, 0x15, 0x68, 0xc5, 0x6b, 0x60, 0x6a, 0x59, + 0xb7, 0x24, 0xbc, 0x99, 0xf3, 0xf4, 0x37, 0x49, 0xf8, 0x70, 0xa1, 0xf1, 0x85, 0x8e, 0x5e, 0x00, + 0x30, 0x2c, 0x67, 0xec, 0x8b, 0x86, 0x42, 0x9c, 0x4f, 0x79, 0x8e, 0xf0, 0xbd, 0xcf, 0xce, 0x9e, + 0xb1, 0x1f, 0xc9, 0x53, 0x5c, 0x0e, 0x02, 0xe2, 0x84, 0x07, 0x13, 0x47, 0xd3, 0xdc, 0xd1, 0x8f, + 0x97, 0xac, 0x74, 0xae, 0x56, 0x7f, 0x0a, 0x48, 0x33, 0x0d, 0x6a, 0xf9, 0xaa, 0xe7, 0xbb, 0x94, + 0x8c, 0x0c, 0x6b, 0xc0, 0x0f, 0xe0, 0xdc, 0x5e, 0xa6, 0x4f, 0x4c, 0x8f, 0x2a, 0x6b, 0x42, 0xdc, + 0x0d, 0xa5, 0x4c, 0x83, 0xd7, 0x38, 0x37, 0xa6, 0x91, 0x9d, 0xd2, 0x10, 0xe2, 0x48, 0xa3, 0xfc, + 0xa7, 0x1c, 0x14, 0x62, 0x6d, 0x29, 0xbe, 0x04, 0xc5, 0xa7, 0xe4, 0x39, 0x51, 0xc3, 0xab, 0x86, + 0x88, 0x44, 0x81, 0x61, 0x9d, 0xe0, 0xba, 0xf1, 0x29, 0x6c, 0x72, 0x8a, 0x3d, 0xf6, 0xa9, 0xab, + 0x6a, 0x26, 0xf1, 0x3c, 0x1e, 0xb4, 0x1c, 0xa7, 0x62, 0x26, 0x6b, 0x33, 0x51, 0x2d, 0x94, 0xe0, + 0xbb, 0xb0, 0xc1, 0x35, 0x46, 0x63, 0xd3, 0x37, 0x1c, 0x93, 0xaa, 0xec, 0xf2, 0xe3, 0xf1, 0x83, + 0x38, 0xf2, 0x6c, 0x9d, 0x31, 0x0e, 0x03, 0x02, 0xf3, 0xc8, 0xc3, 0x75, 0xb8, 0xc0, 0xd5, 0x06, + 0xd4, 0xa2, 0x2e, 0xf1, 0xa9, 0x4a, 0xbf, 0x19, 0x13, 0xd3, 0x53, 0x89, 0xa5, 0xab, 0x43, 0xe2, + 0x0d, 0xa5, 0x4d, 0x66, 0x60, 0x3f, 0x29, 0x25, 0x94, 0x73, 0x8c, 0x78, 0x10, 0xf0, 0x64, 0x4e, + 0xab, 0x5a, 0xfa, 0x97, 0xc4, 0x1b, 0xe2, 0x3d, 0x38, 0xc3, 0xad, 0x78, 0xbe, 0x6b, 0x58, 0x03, + 0x55, 0x1b, 0x52, 0xed, 0x99, 0x3a, 0xf6, 0xfb, 0x0f, 0xa4, 0xf3, 0xf1, 0xf9, 0xb9, 0x87, 0x5d, + 0xce, 0xa9, 0x31, 0xca, 0x91, 0xdf, 0x7f, 0x80, 0xbb, 0x50, 0x64, 0x1f, 0x63, 0x64, 0xbc, 0xa2, + 0x6a, 0xdf, 0x76, 0x79, 0x65, 0x29, 0x2d, 0xd8, 0xd9, 0xb1, 0x08, 0x56, 0xda, 0x81, 0xc2, 0xa1, + 0xad, 0xd3, 0xbd, 0x4c, 0xb7, 0x23, 0xcb, 0x75, 0xa5, 0x10, 0x5a, 0x79, 0x68, 0xbb, 0x2c, 0xa1, + 0x06, 0x76, 0x14, 0xe0, 0x82, 0x48, 0xa8, 0x81, 0x1d, 0x86, 0xf7, 0x2e, 0x6c, 0x68, 0x9a, 0x58, + 0xb3, 0xa1, 0xa9, 0xc1, 0x15, 0xc5, 0x93, 0xd0, 0x54, 0xb0, 0x34, 0xed, 0x40, 0x10, 0x82, 0x1c, + 0xf7, 0xf0, 0x67, 0xf0, 0xe1, 0x24, 0x58, 0x71, 0xc5, 0xf5, 0xb9, 0x55, 0xce, 0xaa, 0xde, 0x85, + 0x0d, 0xe7, 0x64, 0x5e, 0x11, 0x4f, 0xcd, 0xe8, 0x9c, 0xcc, 0xaa, 0xdd, 0x87, 0x4d, 0x67, 0xe8, + 0xcc, 0xeb, 0x6d, 0xc4, 0xf5, 0xb0, 0x33, 0x74, 0x66, 0x15, 0xaf, 0xf2, 0xfb, 0xaa, 0x4b, 0x35, + 0xe2, 0x53, 0x5d, 0x3a, 0x1b, 0xa7, 0xc7, 0x04, 0xf8, 0x16, 0x20, 0x4d, 0x53, 0xa9, 0x45, 0x8e, + 0x4d, 0xaa, 0x12, 0x97, 0x5a, 0xc4, 0x93, 0x2e, 0xc6, 0xc9, 0x25, 0x4d, 0x93, 0xb9, 0xb4, 0xca, + 0x85, 0xf8, 0x06, 0xac, 0xdb, 0xc7, 0x4f, 0x35, 0x91, 0x92, 0xaa, 0xe3, 0xd2, 0xbe, 0xf1, 0x52, + 0xba, 0xc2, 0xe3, 0xbb, 0xc6, 0x04, 0x3c, 0x21, 0x3b, 0x1c, 0xc6, 0xd7, 0x01, 0x69, 0xde, 0x90, + 0xb8, 0x0e, 0xef, 0x09, 0x3c, 0x87, 0x68, 0x54, 0xba, 0x2a, 0xa8, 0x02, 0x6f, 0x85, 0x30, 0xdb, + 0x12, 0xde, 0x0b, 0xa3, 0xef, 0x87, 0x16, 0xaf, 0x89, 0x2d, 0xc1, 0xb1, 0xc0, 0xda, 0x0e, 0x20, + 0x16, 0x8a, 0xa9, 0x89, 0x77, 0x38, 0xad, 0xe4, 0x0c, 0x9d, 0xf8, 0xbc, 0x97, 0x61, 0x95, 0x31, + 0x27, 0x93, 0x5e, 0x17, 0xfd, 0x8c, 0x33, 0x8c, 0xcd, 0xf8, 0xce, 0x5a, 0xcb, 0xf2, 0x1e, 0x14, + 0xe3, 0xf9, 0x89, 0xf3, 0x20, 0x32, 0x14, 0x25, 0x58, 0xad, 0xaf, 0xb5, 0xeb, 0xac, 0x4a, 0x7f, + 0x2d, 0xa3, 0x24, 0xeb, 0x16, 0x9a, 0x8d, 0x9e, 0xac, 0x2a, 0x47, 0xad, 0x5e, 0xe3, 0x50, 0x46, + 0xa9, 0x78, 0x5b, 0xfa, 0xb7, 0x24, 0x94, 0xa6, 0x6f, 0x18, 0xf8, 0x27, 0x70, 0x36, 0x7c, 0x0e, + 0xf0, 0xa8, 0xaf, 0xbe, 0x30, 0x5c, 0xbe, 0x65, 0x46, 0x44, 0x74, 0xd8, 0xd1, 0x47, 0xdb, 0x0c, + 0x58, 0x5d, 0xea, 0x7f, 0x65, 0xb8, 0x6c, 0x43, 0x8c, 0x88, 0x8f, 0x9b, 0x70, 0xd1, 0xb2, 0x55, + 0xcf, 0x27, 0x96, 0x4e, 0x5c, 0x5d, 0x9d, 0x3c, 0xc4, 0xa8, 0x44, 0xd3, 0xa8, 0xe7, 0xd9, 0xa2, + 0x54, 0x45, 0x56, 0x3e, 0xb2, 0xec, 0x6e, 0x40, 0x9e, 0x9c, 0xe1, 0xd5, 0x80, 0x3a, 0x93, 0x60, + 0xa9, 0x65, 0x09, 0x76, 0x1e, 0xf2, 0x23, 0xe2, 0xa8, 0xd4, 0xf2, 0xdd, 0x13, 0xde, 0x57, 0xe6, + 0x94, 0xdc, 0x88, 0x38, 0x32, 0x1b, 0xbf, 0x9f, 0xf6, 0xfe, 0x9f, 0x29, 0x28, 0xc6, 0x7b, 0x4b, + 0xd6, 0xaa, 0x6b, 0xbc, 0x8e, 0x24, 0xf8, 0x49, 0x73, 0xf9, 0x8d, 0x9d, 0x68, 0xa5, 0xc6, 0x0a, + 0xcc, 0x5e, 0x56, 0x74, 0x7c, 0x8a, 0xd0, 0x64, 0xc5, 0x9d, 0x9d, 0x2d, 0x54, 0xdc, 0x62, 0x72, + 0x4a, 0x30, 0xc2, 0x07, 0x90, 0x7d, 0xea, 0x71, 0xdb, 0x59, 0x6e, 0xfb, 0xca, 0x9b, 0x6d, 0x3f, + 0xea, 0x72, 0xe3, 0xf9, 0x47, 0x5d, 0xb5, 0xd5, 0x56, 0x0e, 0xab, 0x4d, 0x25, 0x50, 0xc7, 0xe7, + 0x20, 0x6d, 0x92, 0x57, 0x27, 0xd3, 0xa5, 0x88, 0x43, 0xa7, 0x0d, 0xfc, 0x39, 0x48, 0xbf, 0xa0, + 0xe4, 0xd9, 0x74, 0x01, 0xe0, 0xd0, 0x3b, 0x4c, 0xfd, 0x5b, 0x90, 0xe1, 0xf1, 0xc2, 0x00, 0x41, + 0xc4, 0xd0, 0x07, 0x38, 0x07, 0xe9, 0x5a, 0x5b, 0x61, 0xe9, 0x8f, 0xa0, 0x28, 0x50, 0xb5, 0xd3, + 0x90, 0x6b, 0x32, 0x4a, 0x96, 0xef, 0x42, 0x56, 0x04, 0x81, 0x6d, 0x8d, 0x28, 0x0c, 0xe8, 0x83, + 0x60, 0x18, 0xd8, 0x48, 0x84, 0xd2, 0xa3, 0xc3, 0x7d, 0x59, 0x41, 0xc9, 0xf8, 0xe7, 0xf5, 0xa0, + 0x18, 0x6f, 0x2b, 0xdf, 0x4f, 0x4e, 0xfd, 0x35, 0x01, 0x85, 0x58, 0x9b, 0xc8, 0x1a, 0x14, 0x62, + 0x9a, 0xf6, 0x0b, 0x95, 0x98, 0x06, 0xf1, 0x82, 0xa4, 0x00, 0x0e, 0x55, 0x19, 0x72, 0xda, 0x8f, + 0xf6, 0x5e, 0x9c, 0xff, 0x43, 0x02, 0xd0, 0x6c, 0x8b, 0x39, 0xe3, 0x60, 0xe2, 0x07, 0x75, 0xf0, + 0xf7, 0x09, 0x28, 0x4d, 0xf7, 0x95, 0x33, 0xee, 0x5d, 0xfa, 0x41, 0xdd, 0xfb, 0x57, 0x12, 0x56, + 0xa7, 0xba, 0xc9, 0xd3, 0x7a, 0xf7, 0x0d, 0xac, 0x1b, 0x3a, 0x1d, 0x39, 0xb6, 0x4f, 0x2d, 0xed, + 0x44, 0x35, 0xe9, 0x73, 0x6a, 0x4a, 0x65, 0x7e, 0x50, 0xdc, 0x7a, 0x73, 0xbf, 0x5a, 0x69, 0x4c, + 0xf4, 0x9a, 0x4c, 0x6d, 0x6f, 0xa3, 0x51, 0x97, 0x0f, 0x3b, 0xed, 0x9e, 0xdc, 0xaa, 0x3d, 0x51, + 0x8f, 0x5a, 0x3f, 0x6f, 0xb5, 0xbf, 0x6a, 0x29, 0xc8, 0x98, 0xa1, 0xbd, 0xc3, 0xad, 0xde, 0x01, + 0x34, 0xeb, 0x14, 0x3e, 0x0b, 0x8b, 0xdc, 0x42, 0x1f, 0xe0, 0x0d, 0x58, 0x6b, 0xb5, 0xd5, 0x6e, + 0xa3, 0x2e, 0xab, 0xf2, 0xc3, 0x87, 0x72, 0xad, 0xd7, 0x15, 0x17, 0xf8, 0x88, 0xdd, 0x9b, 0xde, + 0xd4, 0xbf, 0x4b, 0xc1, 0xc6, 0x02, 0x4f, 0x70, 0x35, 0xb8, 0x3b, 0x88, 0xeb, 0xcc, 0x8f, 0x4f, + 0xe3, 0x7d, 0x85, 0x95, 0xfc, 0x0e, 0x71, 0xfd, 0xe0, 0xaa, 0x71, 0x1d, 0x58, 0x94, 0x2c, 0xdf, + 0xe8, 0x1b, 0xd4, 0x0d, 0xde, 0x3b, 0xc4, 0x85, 0x62, 0x6d, 0x82, 0x8b, 0x27, 0x8f, 0x1f, 0x01, + 0x76, 0x6c, 0xcf, 0xf0, 0x8d, 0xe7, 0x54, 0x35, 0xac, 0xf0, 0x71, 0x84, 0x5d, 0x30, 0xd2, 0x0a, + 0x0a, 0x25, 0x0d, 0xcb, 0x8f, 0xd8, 0x16, 0x1d, 0x90, 0x19, 0x36, 0x3b, 0xc0, 0x53, 0x0a, 0x0a, + 0x25, 0x11, 0xfb, 0x12, 0x14, 0x75, 0x7b, 0xcc, 0xba, 0x2e, 0xc1, 0x63, 0xf5, 0x22, 0xa1, 0x14, + 0x04, 0x16, 0x51, 0x82, 0x7e, 0x7a, 0xf2, 0x2a, 0x53, 0x54, 0x0a, 0x02, 0x13, 0x94, 0x6b, 0xb0, + 0x46, 0x06, 0x03, 0x97, 0x19, 0x0f, 0x0d, 0x89, 0x1b, 0x42, 0x29, 0x82, 0x39, 0x71, 0xeb, 0x11, + 0xe4, 0xc2, 0x38, 0xb0, 0x92, 0xcc, 0x22, 0xa1, 0x3a, 0xe2, 0x65, 0x2e, 0xb9, 0x93, 0x57, 0x72, + 0x56, 0x28, 0xbc, 0x04, 0x45, 0xc3, 0x53, 0x27, 0x8f, 0xcc, 0xc9, 0xed, 0xe4, 0x4e, 0x4e, 0x29, + 0x18, 0x5e, 0xf4, 0x40, 0x57, 0xfe, 0x2e, 0x09, 0xa5, 0xe9, 0x47, 0x72, 0x5c, 0x87, 0x9c, 0x69, + 0x6b, 0x84, 0xa7, 0x96, 0xf8, 0x85, 0x66, 0xe7, 0x2d, 0xef, 0xea, 0x95, 0x66, 0xc0, 0x57, 0x22, + 0xcd, 0xad, 0xbf, 0x27, 0x20, 0x17, 0xc2, 0xf8, 0x0c, 0xa4, 0x1d, 0xe2, 0x0f, 0xb9, 0xb9, 0xcc, + 0x7e, 0x12, 0x25, 0x14, 0x3e, 0x66, 0xb8, 0xe7, 0x10, 0x8b, 0xa7, 0x40, 0x80, 0xb3, 0x31, 0xfb, + 0xae, 0x26, 0x25, 0x3a, 0xbf, 0x7e, 0xd8, 0xa3, 0x11, 0xb5, 0x7c, 0x2f, 0xfc, 0xae, 0x01, 0x5e, + 0x0b, 0x60, 0x7c, 0x13, 0xd6, 0x7d, 0x97, 0x18, 0xe6, 0x14, 0x37, 0xcd, 0xb9, 0x28, 0x14, 0x44, + 0xe4, 0x3d, 0x38, 0x17, 0xda, 0xd5, 0xa9, 0x4f, 0xb4, 0x21, 0xd5, 0x27, 0x4a, 0x59, 0xfe, 0x02, + 0x7b, 0x36, 0x20, 0xd4, 0x03, 0x79, 0xa8, 0x5b, 0xfe, 0x47, 0x02, 0xd6, 0xc3, 0x0b, 0x93, 0x1e, + 0x05, 0xeb, 0x10, 0x80, 0x58, 0x96, 0xed, 0xc7, 0xc3, 0x35, 0x9f, 0xca, 0x73, 0x7a, 0x95, 0x6a, + 0xa4, 0xa4, 0xc4, 0x0c, 0x6c, 0x8d, 0x00, 0x26, 0x92, 0xa5, 0x61, 0xbb, 0x08, 0x85, 0xe0, 0x17, + 0x10, 0xfe, 0x33, 0x9a, 0xb8, 0x62, 0x83, 0x80, 0xd8, 0xcd, 0x0a, 0x6f, 0x42, 0xe6, 0x98, 0x0e, + 0x0c, 0x2b, 0x78, 0xd7, 0x14, 0x83, 0xf0, 0xad, 0x36, 0x1d, 0xbd, 0xd5, 0xee, 0x3f, 0x86, 0x0d, + 0xcd, 0x1e, 0xcd, 0xba, 0xbb, 0x8f, 0x66, 0xae, 0xf9, 0xde, 0x97, 0x89, 0xaf, 0x61, 0xd2, 0x62, + 0x7e, 0x9b, 0x4c, 0x1d, 0x74, 0xf6, 0xff, 0x98, 0xdc, 0x3a, 0x10, 0x7a, 0x9d, 0x70, 0x99, 0x0a, + 0xed, 0x9b, 0x54, 0x63, 0xae, 0xff, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x6b, 0x03, 0xf1, 0x99, 0x1b, + 0x1c, 0x00, 0x00, } diff --git a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/gostring.go b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go similarity index 82% rename from src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/gostring.go rename to src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go index b08b81c1..be534f0f 100644 --- a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/gostring.go +++ b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go @@ -1,12 +1,50 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: descriptor.proto + +/* +Package descriptor is a generated protocol buffer package. + +It is generated from these files: + descriptor.proto + +It has these top-level messages: + FileDescriptorSet + FileDescriptorProto + DescriptorProto + ExtensionRangeOptions + FieldDescriptorProto + OneofDescriptorProto + EnumDescriptorProto + EnumValueDescriptorProto + ServiceDescriptorProto + MethodDescriptorProto + FileOptions + MessageOptions + FieldOptions + OneofOptions + EnumOptions + EnumValueOptions + ServiceOptions + MethodOptions + UninterpretedOption + SourceCodeInfo + GeneratedCodeInfo +*/ package descriptor import fmt "fmt" - import strings "strings" import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" import sort "sort" import strconv "strconv" import reflect "reflect" +import proto "github.com/gogo/protobuf/proto" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf func (this *FileDescriptorSet) GoString() string { if this == nil { @@ -117,7 +155,7 @@ func (this *DescriptorProto_ExtensionRange) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 6) + s := make([]string, 0, 7) s = append(s, "&descriptor.DescriptorProto_ExtensionRange{") if this.Start != nil { s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") @@ -125,6 +163,9 @@ func (this *DescriptorProto_ExtensionRange) GoString() string { if this.End != nil { s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } if this.XXX_unrecognized != nil { s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") } @@ -149,6 +190,22 @@ func (this *DescriptorProto_ReservedRange) GoString() string { s = append(s, "}") return strings.Join(s, "") } +func (this *ExtensionRangeOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.ExtensionRangeOptions{") + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} func (this *FieldDescriptorProto) GoString() string { if this == nil { return "nil" @@ -162,10 +219,10 @@ func (this *FieldDescriptorProto) GoString() string { s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n") } if this.Label != nil { - s = append(s, "Label: "+valueToGoStringDescriptor(this.Label, "descriptor.FieldDescriptorProto_Label")+",\n") + s = append(s, "Label: "+valueToGoStringDescriptor(this.Label, "FieldDescriptorProto_Label")+",\n") } if this.Type != nil { - s = append(s, "Type: "+valueToGoStringDescriptor(this.Type, "descriptor.FieldDescriptorProto_Type")+",\n") + s = append(s, "Type: "+valueToGoStringDescriptor(this.Type, "FieldDescriptorProto_Type")+",\n") } if this.TypeName != nil { s = append(s, "TypeName: "+valueToGoStringDescriptor(this.TypeName, "string")+",\n") @@ -195,11 +252,14 @@ func (this *OneofDescriptorProto) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 5) + s := make([]string, 0, 6) s = append(s, "&descriptor.OneofDescriptorProto{") if this.Name != nil { s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } if this.XXX_unrecognized != nil { s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") } @@ -303,7 +363,7 @@ func (this *FileOptions) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 20) + s := make([]string, 0, 23) s = append(s, "&descriptor.FileOptions{") if this.JavaPackage != nil { s = append(s, "JavaPackage: "+valueToGoStringDescriptor(this.JavaPackage, "string")+",\n") @@ -321,7 +381,7 @@ func (this *FileOptions) GoString() string { s = append(s, "JavaStringCheckUtf8: "+valueToGoStringDescriptor(this.JavaStringCheckUtf8, "bool")+",\n") } if this.OptimizeFor != nil { - s = append(s, "OptimizeFor: "+valueToGoStringDescriptor(this.OptimizeFor, "descriptor.FileOptions_OptimizeMode")+",\n") + s = append(s, "OptimizeFor: "+valueToGoStringDescriptor(this.OptimizeFor, "FileOptions_OptimizeMode")+",\n") } if this.GoPackage != nil { s = append(s, "GoPackage: "+valueToGoStringDescriptor(this.GoPackage, "string")+",\n") @@ -335,6 +395,9 @@ func (this *FileOptions) GoString() string { if this.PyGenericServices != nil { s = append(s, "PyGenericServices: "+valueToGoStringDescriptor(this.PyGenericServices, "bool")+",\n") } + if this.PhpGenericServices != nil { + s = append(s, "PhpGenericServices: "+valueToGoStringDescriptor(this.PhpGenericServices, "bool")+",\n") + } if this.Deprecated != nil { s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") } @@ -347,8 +410,14 @@ func (this *FileOptions) GoString() string { if this.CsharpNamespace != nil { s = append(s, "CsharpNamespace: "+valueToGoStringDescriptor(this.CsharpNamespace, "string")+",\n") } - if this.JavananoUseDeprecatedPackage != nil { - s = append(s, "JavananoUseDeprecatedPackage: "+valueToGoStringDescriptor(this.JavananoUseDeprecatedPackage, "bool")+",\n") + if this.SwiftPrefix != nil { + s = append(s, "SwiftPrefix: "+valueToGoStringDescriptor(this.SwiftPrefix, "string")+",\n") + } + if this.PhpClassPrefix != nil { + s = append(s, "PhpClassPrefix: "+valueToGoStringDescriptor(this.PhpClassPrefix, "string")+",\n") + } + if this.PhpNamespace != nil { + s = append(s, "PhpNamespace: "+valueToGoStringDescriptor(this.PhpNamespace, "string")+",\n") } if this.UninterpretedOption != nil { s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") @@ -395,13 +464,13 @@ func (this *FieldOptions) GoString() string { s := make([]string, 0, 11) s = append(s, "&descriptor.FieldOptions{") if this.Ctype != nil { - s = append(s, "Ctype: "+valueToGoStringDescriptor(this.Ctype, "descriptor.FieldOptions_CType")+",\n") + s = append(s, "Ctype: "+valueToGoStringDescriptor(this.Ctype, "FieldOptions_CType")+",\n") } if this.Packed != nil { s = append(s, "Packed: "+valueToGoStringDescriptor(this.Packed, "bool")+",\n") } if this.Jstype != nil { - s = append(s, "Jstype: "+valueToGoStringDescriptor(this.Jstype, "descriptor.FieldOptions_JSType")+",\n") + s = append(s, "Jstype: "+valueToGoStringDescriptor(this.Jstype, "FieldOptions_JSType")+",\n") } if this.Lazy != nil { s = append(s, "Lazy: "+valueToGoStringDescriptor(this.Lazy, "bool")+",\n") @@ -422,6 +491,22 @@ func (this *FieldOptions) GoString() string { s = append(s, "}") return strings.Join(s, "") } +func (this *OneofOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.OneofOptions{") + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} func (this *EnumOptions) GoString() string { if this == nil { return "nil" @@ -486,11 +571,14 @@ func (this *MethodOptions) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 6) + s := make([]string, 0, 7) s = append(s, "&descriptor.MethodOptions{") if this.Deprecated != nil { s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") } + if this.IdempotencyLevel != nil { + s = append(s, "IdempotencyLevel: "+valueToGoStringDescriptor(this.IdempotencyLevel, "MethodOptions_IdempotencyLevel")+",\n") + } if this.UninterpretedOption != nil { s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") } @@ -594,6 +682,45 @@ func (this *SourceCodeInfo_Location) GoString() string { s = append(s, "}") return strings.Join(s, "") } +func (this *GeneratedCodeInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.GeneratedCodeInfo{") + if this.Annotation != nil { + s = append(s, "Annotation: "+fmt.Sprintf("%#v", this.Annotation)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *GeneratedCodeInfo_Annotation) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&descriptor.GeneratedCodeInfo_Annotation{") + if this.Path != nil { + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + } + if this.SourceFile != nil { + s = append(s, "SourceFile: "+valueToGoStringDescriptor(this.SourceFile, "string")+",\n") + } + if this.Begin != nil { + s = append(s, "Begin: "+valueToGoStringDescriptor(this.Begin, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} func valueToGoStringDescriptor(v interface{}, typ string) string { rv := reflect.ValueOf(v) if rv.IsNil() { diff --git a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go index 861f4d02..e0846a35 100644 --- a/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go +++ b/src/stackdriver-nozzle/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go @@ -99,6 +99,17 @@ func (field *FieldDescriptorProto) GetKeyUint64() (x uint64) { return x } +func (field *FieldDescriptorProto) GetKey3Uint64() (x uint64) { + packed := field.IsPacked3() + wireType := field.WireType() + fieldNumber := field.GetNumber() + if packed { + wireType = 2 + } + x = uint64(uint32(fieldNumber)<<3 | uint32(wireType)) + return x +} + func (field *FieldDescriptorProto) GetKey() []byte { x := field.GetKeyUint64() i := 0 @@ -111,6 +122,18 @@ func (field *FieldDescriptorProto) GetKey() []byte { return keybuf } +func (field *FieldDescriptorProto) GetKey3() []byte { + x := field.GetKey3Uint64() + i := 0 + keybuf := make([]byte, 0) + for i = 0; x > 127; i++ { + keybuf = append(keybuf, 0x80|uint8(x&0x7F)) + x >>= 7 + } + keybuf = append(keybuf, uint8(x)) + return keybuf +} + func (desc *FileDescriptorSet) GetField(packageName, messageName, fieldName string) *FieldDescriptorProto { msg := desc.GetMessage(packageName, messageName) if msg == nil { @@ -352,6 +375,16 @@ func (f *FieldDescriptorProto) IsPacked() bool { return f.Options != nil && f.GetOptions().GetPacked() } +func (f *FieldDescriptorProto) IsPacked3() bool { + if f.IsRepeated() && f.IsScalar() { + if f.Options == nil || f.GetOptions().Packed == nil { + return true + } + return f.Options != nil && f.GetOptions().GetPacked() + } + return false +} + func (m *DescriptorProto) HasExtension() bool { return len(m.ExtensionRange) > 0 } diff --git a/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/proto/decode.go b/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/proto/decode.go index 04dcb881..aa207298 100644 --- a/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/proto/decode.go +++ b/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/proto/decode.go @@ -61,7 +61,6 @@ var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for // int32, int64, uint32, uint64, bool, and enum // protocol buffer types. func DecodeVarint(buf []byte) (x uint64, n int) { - // x, n already 0 for shift := uint(0); shift < 64; shift += 7 { if n >= len(buf) { return 0, 0 @@ -78,13 +77,7 @@ func DecodeVarint(buf []byte) (x uint64, n int) { return 0, 0 } -// DecodeVarint reads a varint-encoded integer from the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) DecodeVarint() (x uint64, err error) { - // x, err already 0 - +func (p *Buffer) decodeVarintSlow() (x uint64, err error) { i := p.index l := len(p.buf) @@ -107,6 +100,107 @@ func (p *Buffer) DecodeVarint() (x uint64, err error) { return } +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) DecodeVarint() (x uint64, err error) { + i := p.index + buf := p.buf + + if i >= len(buf) { + return 0, io.ErrUnexpectedEOF + } else if buf[i] < 0x80 { + p.index++ + return uint64(buf[i]), nil + } else if len(buf)-i < 10 { + return p.decodeVarintSlow() + } + + var b uint64 + // we already checked the first byte + x = uint64(buf[i]) - 0x80 + i++ + + b = uint64(buf[i]) + i++ + x += b << 7 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 7 + + b = uint64(buf[i]) + i++ + x += b << 14 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 14 + + b = uint64(buf[i]) + i++ + x += b << 21 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 21 + + b = uint64(buf[i]) + i++ + x += b << 28 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 28 + + b = uint64(buf[i]) + i++ + x += b << 35 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 35 + + b = uint64(buf[i]) + i++ + x += b << 42 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 42 + + b = uint64(buf[i]) + i++ + x += b << 49 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 49 + + b = uint64(buf[i]) + i++ + x += b << 56 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 56 + + b = uint64(buf[i]) + i++ + x += b << 63 + if b&0x80 == 0 { + goto done + } + // x -= 0x80 << 63 // Always zero. + + return 0, errOverflow + +done: + p.index = i + return x, nil +} + // DecodeFixed64 reads a 64-bit integer from the Buffer. // This is the format for the // fixed64, sfixed64, and double protocol buffer types. @@ -340,6 +434,8 @@ func (p *Buffer) DecodeGroup(pb Message) error { // Buffer and places the decoded result in pb. If the struct // underlying pb does not match the data in the buffer, the results can be // unpredictable. +// +// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. func (p *Buffer) Unmarshal(pb Message) error { // If the object can unmarshal itself, let it. if u, ok := pb.(Unmarshaler); ok { diff --git a/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/proto/encode.go b/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/proto/encode.go index 8c1b8fd1..8b84d1b2 100644 --- a/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/proto/encode.go +++ b/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/proto/encode.go @@ -174,11 +174,11 @@ func sizeFixed32(x uint64) int { // This is the format used for the sint64 protocol buffer type. func (p *Buffer) EncodeZigzag64(x uint64) error { // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) + return p.EncodeVarint((x << 1) ^ uint64((int64(x) >> 63))) } func sizeZigzag64(x uint64) int { - return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) + return sizeVarint((x << 1) ^ uint64((int64(x) >> 63))) } // EncodeZigzag32 writes a zigzag-encoded 32-bit integer @@ -234,10 +234,6 @@ func Marshal(pb Message) ([]byte, error) { } p := NewBuffer(nil) err := p.Marshal(pb) - var state errorState - if err != nil && !state.shouldContinue(err, nil) { - return nil, err - } if p.buf == nil && err == nil { // Return a non-nil slice on success. return []byte{}, nil @@ -266,11 +262,8 @@ func (p *Buffer) Marshal(pb Message) error { // Can the object marshal itself? if m, ok := pb.(Marshaler); ok { data, err := m.Marshal() - if err != nil { - return err - } p.buf = append(p.buf, data...) - return nil + return err } t, base, err := getbase(pb) @@ -282,7 +275,7 @@ func (p *Buffer) Marshal(pb Message) error { } if collectStats { - stats.Encode++ + (stats).Encode++ // Parens are to work around a goimports bug. } if len(p.buf) > maxMarshalSize { @@ -309,7 +302,7 @@ func Size(pb Message) (n int) { } if collectStats { - stats.Size++ + (stats).Size++ // Parens are to work around a goimports bug. } return @@ -1014,7 +1007,6 @@ func size_slice_struct_message(p *Properties, base structPointer) (n int) { if p.isMarshaler { m := structPointer_Interface(structp, p.stype).(Marshaler) data, _ := m.Marshal() - n += len(p.tagcode) n += sizeRawBytes(data) continue } @@ -1083,10 +1075,17 @@ func (o *Buffer) enc_map(p *Properties, base structPointer) error { func (o *Buffer) enc_exts(p *Properties, base structPointer) error { exts := structPointer_Extensions(base, p.field) - if err := encodeExtensions(exts); err != nil { + + v, mu := exts.extensionsRead() + if v == nil { + return nil + } + + mu.Lock() + defer mu.Unlock() + if err := encodeExtensionsMap(v); err != nil { return err } - v, _ := exts.extensionsRead() return o.enc_map_body(v) } diff --git a/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/proto/extensions.go b/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/proto/extensions.go index 6b9b3637..eaad2183 100644 --- a/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/proto/extensions.go +++ b/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/proto/extensions.go @@ -154,6 +154,7 @@ type ExtensionDesc struct { Field int32 // field number Name string // fully-qualified name of extension, for text formatting Tag string // protobuf tag style + Filename string // name of the file in which the extension is defined } func (ed *ExtensionDesc) repeated() bool { diff --git a/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/proto/lib.go b/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/proto/lib.go index ac4ddbc0..1c225504 100644 --- a/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/proto/lib.go +++ b/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/proto/lib.go @@ -73,7 +73,6 @@ for a protocol buffer variable v: When the .proto file specifies `syntax="proto3"`, there are some differences: - Non-repeated fields of non-message type are values instead of pointers. - - Getters are only generated for message and oneof fields. - Enum types do not get an Enum method. The simplest way to describe this is to see an example. diff --git a/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/proto/text_parser.go b/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/proto/text_parser.go index 4fd05312..5e14513f 100644 --- a/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/proto/text_parser.go +++ b/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/proto/text_parser.go @@ -592,7 +592,11 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { props = oop.Prop nv := reflect.New(oop.Type.Elem()) dst = nv.Elem().Field(0) - sv.Field(oop.Field).Set(nv) + field := sv.Field(oop.Field) + if !field.IsNil() { + return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) + } + field.Set(nv) } if !dst.IsValid() { return p.errorf("unknown field name %q in %v", name, st) @@ -861,7 +865,7 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { return p.readStruct(fv, terminator) case reflect.Uint32: if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(uint64(x)) + fv.SetUint(x) return nil } case reflect.Uint64: diff --git a/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile b/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile new file mode 100644 index 00000000..f706871a --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/Makefile @@ -0,0 +1,37 @@ +# Go support for Protocol Buffers - Google's data interchange format +# +# Copyright 2010 The Go Authors. All rights reserved. +# https://github.com/golang/protobuf +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Not stored here, but descriptor.proto is in https://github.com/google/protobuf/ +# at src/google/protobuf/descriptor.proto +regenerate: + @echo WARNING! THIS RULE IS PROBABLY NOT RIGHT FOR YOUR INSTALLATION + cp $(HOME)/src/protobuf/include/google/protobuf/descriptor.proto . + protoc --go_out=../../../../.. -I$(HOME)/src/protobuf/include $(HOME)/src/protobuf/include/google/protobuf/descriptor.proto diff --git a/src/stackdriver-nozzle/vendor/google.golang.org/genproto/protobuf/descriptor.pb.go b/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go similarity index 76% rename from src/stackdriver-nozzle/vendor/google.golang.org/genproto/protobuf/descriptor.pb.go rename to src/stackdriver-nozzle/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go index 03c04219..1d92cb27 100644 --- a/src/stackdriver-nozzle/vendor/google.golang.org/genproto/protobuf/descriptor.pb.go +++ b/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.pb.go @@ -1,8 +1,35 @@ -// Code generated by protoc-gen-go. -// source: google.golang.org/genproto/protobuf/descriptor.proto -// DO NOT EDIT! - -package descriptor // import "google.golang.org/genproto/protobuf" +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/descriptor.proto + +/* +Package descriptor is a generated protocol buffer package. + +It is generated from these files: + google/protobuf/descriptor.proto + +It has these top-level messages: + FileDescriptorSet + FileDescriptorProto + DescriptorProto + FieldDescriptorProto + OneofDescriptorProto + EnumDescriptorProto + EnumValueDescriptorProto + ServiceDescriptorProto + MethodDescriptorProto + FileOptions + MessageOptions + FieldOptions + OneofOptions + EnumOptions + EnumValueOptions + ServiceOptions + MethodOptions + UninterpretedOption + SourceCodeInfo + GeneratedCodeInfo +*/ +package descriptor import proto "github.com/golang/protobuf/proto" import fmt "fmt" @@ -13,6 +40,12 @@ var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + type FieldDescriptorProto_Type int32 const ( @@ -31,6 +64,10 @@ const ( FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 // New in version 2. @@ -100,7 +137,7 @@ func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { *x = FieldDescriptorProto_Type(value) return nil } -func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{3, 0} } +func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{3, 0} } type FieldDescriptorProto_Label int32 @@ -139,7 +176,7 @@ func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { return nil } func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { - return fileDescriptor1, []int{3, 1} + return fileDescriptor0, []int{3, 1} } // Generated classes can be optimized for speed or code size. @@ -179,7 +216,7 @@ func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { *x = FileOptions_OptimizeMode(value) return nil } -func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{9, 0} } +func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{9, 0} } type FieldOptions_CType int32 @@ -217,7 +254,7 @@ func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { *x = FieldOptions_CType(value) return nil } -func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{11, 0} } +func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{11, 0} } type FieldOptions_JSType int32 @@ -257,7 +294,49 @@ func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { *x = FieldOptions_JSType(value) return nil } -func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{11, 1} } +func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{11, 1} } + +// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, +// or neither? HTTP based RPC implementation may choose GET verb for safe +// methods, and PUT verb for idempotent methods instead of the default POST. +type MethodOptions_IdempotencyLevel int32 + +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 + MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 + MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 +) + +var MethodOptions_IdempotencyLevel_name = map[int32]string{ + 0: "IDEMPOTENCY_UNKNOWN", + 1: "NO_SIDE_EFFECTS", + 2: "IDEMPOTENT", +} +var MethodOptions_IdempotencyLevel_value = map[string]int32{ + "IDEMPOTENCY_UNKNOWN": 0, + "NO_SIDE_EFFECTS": 1, + "IDEMPOTENT": 2, +} + +func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { + p := new(MethodOptions_IdempotencyLevel) + *p = x + return p +} +func (x MethodOptions_IdempotencyLevel) String() string { + return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x)) +} +func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel") + if err != nil { + return err + } + *x = MethodOptions_IdempotencyLevel(value) + return nil +} +func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{16, 0} +} // The protocol compiler can output a FileDescriptorSet containing the .proto // files it parses. @@ -269,7 +348,7 @@ type FileDescriptorSet struct { func (m *FileDescriptorSet) Reset() { *m = FileDescriptorSet{} } func (m *FileDescriptorSet) String() string { return proto.CompactTextString(m) } func (*FileDescriptorSet) ProtoMessage() {} -func (*FileDescriptorSet) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } +func (*FileDescriptorSet) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } func (m *FileDescriptorSet) GetFile() []*FileDescriptorProto { if m != nil { @@ -309,7 +388,7 @@ type FileDescriptorProto struct { func (m *FileDescriptorProto) Reset() { *m = FileDescriptorProto{} } func (m *FileDescriptorProto) String() string { return proto.CompactTextString(m) } func (*FileDescriptorProto) ProtoMessage() {} -func (*FileDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } +func (*FileDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } func (m *FileDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -415,7 +494,7 @@ type DescriptorProto struct { func (m *DescriptorProto) Reset() { *m = DescriptorProto{} } func (m *DescriptorProto) String() string { return proto.CompactTextString(m) } func (*DescriptorProto) ProtoMessage() {} -func (*DescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } +func (*DescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } func (m *DescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -497,7 +576,7 @@ func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ func (m *DescriptorProto_ExtensionRange) String() string { return proto.CompactTextString(m) } func (*DescriptorProto_ExtensionRange) ProtoMessage() {} func (*DescriptorProto_ExtensionRange) Descriptor() ([]byte, []int) { - return fileDescriptor1, []int{2, 0} + return fileDescriptor0, []int{2, 0} } func (m *DescriptorProto_ExtensionRange) GetStart() int32 { @@ -527,7 +606,7 @@ func (m *DescriptorProto_ReservedRange) Reset() { *m = DescriptorProto_R func (m *DescriptorProto_ReservedRange) String() string { return proto.CompactTextString(m) } func (*DescriptorProto_ReservedRange) ProtoMessage() {} func (*DescriptorProto_ReservedRange) Descriptor() ([]byte, []int) { - return fileDescriptor1, []int{2, 1} + return fileDescriptor0, []int{2, 1} } func (m *DescriptorProto_ReservedRange) GetStart() int32 { @@ -582,7 +661,7 @@ type FieldDescriptorProto struct { func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } func (*FieldDescriptorProto) ProtoMessage() {} -func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} } +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } func (m *FieldDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -664,7 +743,7 @@ type OneofDescriptorProto struct { func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } func (*OneofDescriptorProto) ProtoMessage() {} -func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{4} } +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } func (m *OneofDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -691,7 +770,7 @@ type EnumDescriptorProto struct { func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } func (*EnumDescriptorProto) ProtoMessage() {} -func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{5} } +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } func (m *EnumDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -725,7 +804,7 @@ type EnumValueDescriptorProto struct { func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorProto{} } func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } func (*EnumValueDescriptorProto) ProtoMessage() {} -func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{6} } +func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } func (m *EnumValueDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -759,7 +838,7 @@ type ServiceDescriptorProto struct { func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } func (*ServiceDescriptorProto) ProtoMessage() {} -func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{7} } +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } func (m *ServiceDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -800,7 +879,7 @@ type MethodDescriptorProto struct { func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } func (*MethodDescriptorProto) ProtoMessage() {} -func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{8} } +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } const Default_MethodDescriptorProto_ClientStreaming bool = false const Default_MethodDescriptorProto_ServerStreaming bool = false @@ -866,19 +945,8 @@ type FileOptions struct { // generated to contain the file's getDescriptor() method as well as any // top-level extensions defined in the file. JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` - // If set true, then the Java code generator will generate equals() and - // hashCode() methods for all messages defined in the .proto file. - // This increases generated code size, potentially substantially for large - // protos, which may harm a memory-constrained application. - // - In the full runtime this is a speed optimization, as the - // AbstractMessage base class includes reflection-based implementations of - // these methods. - // - In the lite runtime, setting this option changes the semantics of - // equals() and hashCode() to more closely match those of the full runtime; - // the generated methods compute their results based on field values rather - // than object identity. (Implementations should not assume that hashcodes - // will be consistent across runtimes or versions of the protocol compiler.) - JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash,def=0" json:"java_generate_equals_and_hash,omitempty"` + // This option does nothing. + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // If set true, then the Java2 code generator will generate code that // throws an exception whenever an attempt is made to assign a non-UTF-8 // byte sequence to a string field. @@ -906,6 +974,7 @@ type FileOptions struct { CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` + PhpGenericServices *bool `protobuf:"varint,19,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` // Is this file deprecated? // Depending on the target platform, this can emit Deprecated annotations // for everything in the file, or it will be completely ignored; in the very @@ -919,6 +988,18 @@ type FileOptions struct { ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` // Namespace for generated classes; defaults to the package. CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` proto.XXX_InternalExtensions `json:"-"` @@ -928,7 +1009,7 @@ type FileOptions struct { func (m *FileOptions) Reset() { *m = FileOptions{} } func (m *FileOptions) String() string { return proto.CompactTextString(m) } func (*FileOptions) ProtoMessage() {} -func (*FileOptions) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{9} } +func (*FileOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } var extRange_FileOptions = []proto.ExtensionRange{ {1000, 536870911}, @@ -939,12 +1020,12 @@ func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { } const Default_FileOptions_JavaMultipleFiles bool = false -const Default_FileOptions_JavaGenerateEqualsAndHash bool = false const Default_FileOptions_JavaStringCheckUtf8 bool = false const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED const Default_FileOptions_CcGenericServices bool = false const Default_FileOptions_JavaGenericServices bool = false const Default_FileOptions_PyGenericServices bool = false +const Default_FileOptions_PhpGenericServices bool = false const Default_FileOptions_Deprecated bool = false const Default_FileOptions_CcEnableArenas bool = false @@ -973,7 +1054,7 @@ func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { if m != nil && m.JavaGenerateEqualsAndHash != nil { return *m.JavaGenerateEqualsAndHash } - return Default_FileOptions_JavaGenerateEqualsAndHash + return false } func (m *FileOptions) GetJavaStringCheckUtf8() bool { @@ -1018,6 +1099,13 @@ func (m *FileOptions) GetPyGenericServices() bool { return Default_FileOptions_PyGenericServices } +func (m *FileOptions) GetPhpGenericServices() bool { + if m != nil && m.PhpGenericServices != nil { + return *m.PhpGenericServices + } + return Default_FileOptions_PhpGenericServices +} + func (m *FileOptions) GetDeprecated() bool { if m != nil && m.Deprecated != nil { return *m.Deprecated @@ -1046,6 +1134,27 @@ func (m *FileOptions) GetCsharpNamespace() string { return "" } +func (m *FileOptions) GetSwiftPrefix() string { + if m != nil && m.SwiftPrefix != nil { + return *m.SwiftPrefix + } + return "" +} + +func (m *FileOptions) GetPhpClassPrefix() string { + if m != nil && m.PhpClassPrefix != nil { + return *m.PhpClassPrefix + } + return "" +} + +func (m *FileOptions) GetPhpNamespace() string { + if m != nil && m.PhpNamespace != nil { + return *m.PhpNamespace + } + return "" +} + func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { if m != nil { return m.UninterpretedOption @@ -1113,7 +1222,7 @@ type MessageOptions struct { func (m *MessageOptions) Reset() { *m = MessageOptions{} } func (m *MessageOptions) String() string { return proto.CompactTextString(m) } func (*MessageOptions) ProtoMessage() {} -func (*MessageOptions) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{10} } +func (*MessageOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } var extRange_MessageOptions = []proto.ExtensionRange{ {1000, 536870911}, @@ -1203,7 +1312,7 @@ type FieldOptions struct { // // // Note that implementations may choose not to check required fields within - // a lazy sub-message. That is, calling IsInitialized() on the outher message + // a lazy sub-message. That is, calling IsInitialized() on the outer message // may return true even if the inner message has missing required fields. // This is necessary because otherwise the inner message would have to be // parsed in order to perform the check, defeating the purpose of lazy @@ -1229,7 +1338,7 @@ type FieldOptions struct { func (m *FieldOptions) Reset() { *m = FieldOptions{} } func (m *FieldOptions) String() string { return proto.CompactTextString(m) } func (*FieldOptions) ProtoMessage() {} -func (*FieldOptions) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{11} } +func (*FieldOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } var extRange_FieldOptions = []proto.ExtensionRange{ {1000, 536870911}, @@ -1304,7 +1413,7 @@ type OneofOptions struct { func (m *OneofOptions) Reset() { *m = OneofOptions{} } func (m *OneofOptions) String() string { return proto.CompactTextString(m) } func (*OneofOptions) ProtoMessage() {} -func (*OneofOptions) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{12} } +func (*OneofOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } var extRange_OneofOptions = []proto.ExtensionRange{ {1000, 536870911}, @@ -1339,7 +1448,7 @@ type EnumOptions struct { func (m *EnumOptions) Reset() { *m = EnumOptions{} } func (m *EnumOptions) String() string { return proto.CompactTextString(m) } func (*EnumOptions) ProtoMessage() {} -func (*EnumOptions) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{13} } +func (*EnumOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } var extRange_EnumOptions = []proto.ExtensionRange{ {1000, 536870911}, @@ -1387,7 +1496,7 @@ type EnumValueOptions struct { func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } func (*EnumValueOptions) ProtoMessage() {} -func (*EnumValueOptions) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{14} } +func (*EnumValueOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } var extRange_EnumValueOptions = []proto.ExtensionRange{ {1000, 536870911}, @@ -1428,7 +1537,7 @@ type ServiceOptions struct { func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } func (*ServiceOptions) ProtoMessage() {} -func (*ServiceOptions) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{15} } +func (*ServiceOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } var extRange_ServiceOptions = []proto.ExtensionRange{ {1000, 536870911}, @@ -1459,7 +1568,8 @@ type MethodOptions struct { // Depending on the target platform, this can emit Deprecated annotations // for the method, or it will be completely ignored; in the very least, // this is a formalization for deprecating methods. - Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` proto.XXX_InternalExtensions `json:"-"` @@ -1469,7 +1579,7 @@ type MethodOptions struct { func (m *MethodOptions) Reset() { *m = MethodOptions{} } func (m *MethodOptions) String() string { return proto.CompactTextString(m) } func (*MethodOptions) ProtoMessage() {} -func (*MethodOptions) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{16} } +func (*MethodOptions) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } var extRange_MethodOptions = []proto.ExtensionRange{ {1000, 536870911}, @@ -1480,6 +1590,7 @@ func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { } const Default_MethodOptions_Deprecated bool = false +const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN func (m *MethodOptions) GetDeprecated() bool { if m != nil && m.Deprecated != nil { @@ -1488,6 +1599,13 @@ func (m *MethodOptions) GetDeprecated() bool { return Default_MethodOptions_Deprecated } +func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { + if m != nil && m.IdempotencyLevel != nil { + return *m.IdempotencyLevel + } + return Default_MethodOptions_IdempotencyLevel +} + func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { if m != nil { return m.UninterpretedOption @@ -1517,7 +1635,7 @@ type UninterpretedOption struct { func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } func (*UninterpretedOption) ProtoMessage() {} -func (*UninterpretedOption) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{17} } +func (*UninterpretedOption) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { if m != nil { @@ -1583,7 +1701,7 @@ func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOptio func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } func (*UninterpretedOption_NamePart) ProtoMessage() {} func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { - return fileDescriptor1, []int{17, 0} + return fileDescriptor0, []int{17, 0} } func (m *UninterpretedOption_NamePart) GetNamePart() string { @@ -1653,7 +1771,7 @@ type SourceCodeInfo struct { func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } func (*SourceCodeInfo) ProtoMessage() {} -func (*SourceCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{18} } +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { if m != nil { @@ -1749,7 +1867,7 @@ type SourceCodeInfo_Location struct { func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location{} } func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } func (*SourceCodeInfo_Location) ProtoMessage() {} -func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{18, 0} } +func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18, 0} } func (m *SourceCodeInfo_Location) GetPath() []int32 { if m != nil { @@ -1799,7 +1917,7 @@ type GeneratedCodeInfo struct { func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } func (*GeneratedCodeInfo) ProtoMessage() {} -func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{19} } +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { if m != nil { @@ -1828,7 +1946,7 @@ func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_ func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { - return fileDescriptor1, []int{19, 0} + return fileDescriptor0, []int{19, 0} } func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { @@ -1890,156 +2008,167 @@ func init() { proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) -} - -func init() { - proto.RegisterFile("google.golang.org/genproto/protobuf/descriptor.proto", fileDescriptor1) -} - -var fileDescriptor1 = []byte{ - // 2301 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x59, 0xcd, 0x73, 0xdb, 0xc6, - 0x15, 0x0f, 0xf8, 0x25, 0xf2, 0x91, 0xa2, 0x56, 0x2b, 0xc5, 0x81, 0x95, 0x38, 0x96, 0x19, 0x3b, - 0x96, 0xed, 0x96, 0xca, 0xc8, 0x1f, 0x71, 0x94, 0x4e, 0x3a, 0x94, 0x08, 0x2b, 0xf4, 0x50, 0x22, - 0x0b, 0x4a, 0xad, 0x93, 0x1e, 0x30, 0x2b, 0x60, 0x49, 0xc1, 0x06, 0x17, 0x28, 0x00, 0xda, 0x56, - 0x4e, 0x9e, 0xe9, 0xa9, 0xc7, 0xde, 0x3a, 0x6d, 0xa7, 0xd3, 0xc9, 0x25, 0x33, 0xfd, 0x03, 0x7a, - 0xe8, 0xbd, 0xd7, 0xce, 0xf4, 0xde, 0x63, 0x67, 0xda, 0xff, 0xa0, 0xd7, 0xce, 0xee, 0x02, 0x20, - 0xf8, 0x15, 0xab, 0x99, 0x49, 0xd2, 0x8b, 0xcd, 0xfd, 0xbd, 0xdf, 0x7b, 0x78, 0xfb, 0xf6, 0xe1, - 0xbd, 0x87, 0x15, 0xdc, 0x1b, 0xb8, 0xee, 0xc0, 0xa1, 0xf5, 0x81, 0xeb, 0x10, 0x36, 0xa8, 0xbb, - 0xfe, 0x60, 0x7b, 0x40, 0x99, 0xe7, 0xbb, 0xa1, 0xbb, 0x2d, 0xfe, 0x3d, 0x1d, 0xf5, 0xb7, 0x2d, - 0x1a, 0x98, 0xbe, 0xed, 0x85, 0xae, 0x5f, 0x17, 0x18, 0x5e, 0x89, 0xb4, 0x62, 0x46, 0xed, 0x10, - 0x56, 0x1f, 0xd9, 0x0e, 0x6d, 0x26, 0xc4, 0x1e, 0x0d, 0xf1, 0x43, 0xc8, 0xf5, 0x6d, 0x87, 0xaa, - 0xca, 0x66, 0x76, 0xab, 0xbc, 0x73, 0xbd, 0x3e, 0xa5, 0x54, 0x9f, 0xd4, 0xe8, 0x72, 0x58, 0x17, - 0x1a, 0xb5, 0x7f, 0xe6, 0x60, 0x6d, 0x8e, 0x14, 0x63, 0xc8, 0x31, 0x32, 0xe4, 0x16, 0x95, 0xad, - 0x92, 0x2e, 0x7e, 0x63, 0x15, 0x96, 0x3c, 0x62, 0x3e, 0x23, 0x03, 0xaa, 0x66, 0x04, 0x1c, 0x2f, - 0xf1, 0xbb, 0x00, 0x16, 0xf5, 0x28, 0xb3, 0x28, 0x33, 0xcf, 0xd5, 0xec, 0x66, 0x76, 0xab, 0xa4, - 0xa7, 0x10, 0x7c, 0x07, 0x56, 0xbd, 0xd1, 0xa9, 0x63, 0x9b, 0x46, 0x8a, 0x06, 0x9b, 0xd9, 0xad, - 0xbc, 0x8e, 0xa4, 0xa0, 0x39, 0x26, 0xdf, 0x84, 0x95, 0x17, 0x94, 0x3c, 0x4b, 0x53, 0xcb, 0x82, - 0x5a, 0xe5, 0x70, 0x8a, 0xb8, 0x0f, 0x95, 0x21, 0x0d, 0x02, 0x32, 0xa0, 0x46, 0x78, 0xee, 0x51, - 0x35, 0x27, 0x76, 0xbf, 0x39, 0xb3, 0xfb, 0xe9, 0x9d, 0x97, 0x23, 0xad, 0xe3, 0x73, 0x8f, 0xe2, - 0x06, 0x94, 0x28, 0x1b, 0x0d, 0xa5, 0x85, 0xfc, 0x82, 0xf8, 0x69, 0x6c, 0x34, 0x9c, 0xb6, 0x52, - 0xe4, 0x6a, 0x91, 0x89, 0xa5, 0x80, 0xfa, 0xcf, 0x6d, 0x93, 0xaa, 0x05, 0x61, 0xe0, 0xe6, 0x8c, - 0x81, 0x9e, 0x94, 0x4f, 0xdb, 0x88, 0xf5, 0xf0, 0x3e, 0x94, 0xe8, 0xcb, 0x90, 0xb2, 0xc0, 0x76, - 0x99, 0xba, 0x24, 0x8c, 0xdc, 0x98, 0x73, 0x8a, 0xd4, 0xb1, 0xa6, 0x4d, 0x8c, 0xf5, 0xf0, 0x03, - 0x58, 0x72, 0xbd, 0xd0, 0x76, 0x59, 0xa0, 0x16, 0x37, 0x95, 0xad, 0xf2, 0xce, 0x3b, 0x73, 0x13, - 0xa1, 0x23, 0x39, 0x7a, 0x4c, 0xc6, 0x2d, 0x40, 0x81, 0x3b, 0xf2, 0x4d, 0x6a, 0x98, 0xae, 0x45, - 0x0d, 0x9b, 0xf5, 0x5d, 0xb5, 0x24, 0x0c, 0x5c, 0x9d, 0xdd, 0x88, 0x20, 0xee, 0xbb, 0x16, 0x6d, - 0xb1, 0xbe, 0xab, 0x57, 0x83, 0x89, 0x35, 0xbe, 0x04, 0x85, 0xe0, 0x9c, 0x85, 0xe4, 0xa5, 0x5a, - 0x11, 0x19, 0x12, 0xad, 0x6a, 0xff, 0xc9, 0xc3, 0xca, 0x45, 0x52, 0xec, 0x63, 0xc8, 0xf7, 0xf9, - 0x2e, 0xd5, 0xcc, 0xff, 0x12, 0x03, 0xa9, 0x33, 0x19, 0xc4, 0xc2, 0x37, 0x0c, 0x62, 0x03, 0xca, - 0x8c, 0x06, 0x21, 0xb5, 0x64, 0x46, 0x64, 0x2f, 0x98, 0x53, 0x20, 0x95, 0x66, 0x53, 0x2a, 0xf7, - 0x8d, 0x52, 0xea, 0x09, 0xac, 0x24, 0x2e, 0x19, 0x3e, 0x61, 0x83, 0x38, 0x37, 0xb7, 0x5f, 0xe7, - 0x49, 0x5d, 0x8b, 0xf5, 0x74, 0xae, 0xa6, 0x57, 0xe9, 0xc4, 0x1a, 0x37, 0x01, 0x5c, 0x46, 0xdd, - 0xbe, 0x61, 0x51, 0xd3, 0x51, 0x8b, 0x0b, 0xa2, 0xd4, 0xe1, 0x94, 0x99, 0x28, 0xb9, 0x12, 0x35, - 0x1d, 0xfc, 0xd1, 0x38, 0xd5, 0x96, 0x16, 0x64, 0xca, 0xa1, 0x7c, 0xc9, 0x66, 0xb2, 0xed, 0x04, - 0xaa, 0x3e, 0xe5, 0x79, 0x4f, 0xad, 0x68, 0x67, 0x25, 0xe1, 0x44, 0xfd, 0xb5, 0x3b, 0xd3, 0x23, - 0x35, 0xb9, 0xb1, 0x65, 0x3f, 0xbd, 0xc4, 0xef, 0x41, 0x02, 0x18, 0x22, 0xad, 0x40, 0x54, 0xa1, - 0x4a, 0x0c, 0x1e, 0x91, 0x21, 0xdd, 0x78, 0x08, 0xd5, 0xc9, 0xf0, 0xe0, 0x75, 0xc8, 0x07, 0x21, - 0xf1, 0x43, 0x91, 0x85, 0x79, 0x5d, 0x2e, 0x30, 0x82, 0x2c, 0x65, 0x96, 0xa8, 0x72, 0x79, 0x9d, - 0xff, 0xdc, 0xf8, 0x10, 0x96, 0x27, 0x1e, 0x7f, 0x51, 0xc5, 0xda, 0x6f, 0x0a, 0xb0, 0x3e, 0x2f, - 0xe7, 0xe6, 0xa6, 0xff, 0x25, 0x28, 0xb0, 0xd1, 0xf0, 0x94, 0xfa, 0x6a, 0x56, 0x58, 0x88, 0x56, - 0xb8, 0x01, 0x79, 0x87, 0x9c, 0x52, 0x47, 0xcd, 0x6d, 0x2a, 0x5b, 0xd5, 0x9d, 0x3b, 0x17, 0xca, - 0xea, 0x7a, 0x9b, 0xab, 0xe8, 0x52, 0x13, 0x7f, 0x02, 0xb9, 0xa8, 0xc4, 0x71, 0x0b, 0xb7, 0x2f, - 0x66, 0x81, 0xe7, 0xa2, 0x2e, 0xf4, 0xf0, 0xdb, 0x50, 0xe2, 0xff, 0xcb, 0xd8, 0x16, 0x84, 0xcf, - 0x45, 0x0e, 0xf0, 0xb8, 0xe2, 0x0d, 0x28, 0x8a, 0x34, 0xb3, 0x68, 0xdc, 0x1a, 0x92, 0x35, 0x3f, - 0x18, 0x8b, 0xf6, 0xc9, 0xc8, 0x09, 0x8d, 0xe7, 0xc4, 0x19, 0x51, 0x91, 0x30, 0x25, 0xbd, 0x12, - 0x81, 0x3f, 0xe5, 0x18, 0xbe, 0x0a, 0x65, 0x99, 0x95, 0x36, 0xb3, 0xe8, 0x4b, 0x51, 0x7d, 0xf2, - 0xba, 0x4c, 0xd4, 0x16, 0x47, 0xf8, 0xe3, 0x9f, 0x06, 0x2e, 0x8b, 0x8f, 0x56, 0x3c, 0x82, 0x03, - 0xe2, 0xf1, 0x1f, 0x4e, 0x17, 0xbe, 0x2b, 0xf3, 0xb7, 0x37, 0x9d, 0x8b, 0xb5, 0x3f, 0x67, 0x20, - 0x27, 0xde, 0xb7, 0x15, 0x28, 0x1f, 0x7f, 0xd6, 0xd5, 0x8c, 0x66, 0xe7, 0x64, 0xaf, 0xad, 0x21, - 0x05, 0x57, 0x01, 0x04, 0xf0, 0xa8, 0xdd, 0x69, 0x1c, 0xa3, 0x4c, 0xb2, 0x6e, 0x1d, 0x1d, 0x3f, - 0xb8, 0x87, 0xb2, 0x89, 0xc2, 0x89, 0x04, 0x72, 0x69, 0xc2, 0xdd, 0x1d, 0x94, 0xc7, 0x08, 0x2a, - 0xd2, 0x40, 0xeb, 0x89, 0xd6, 0x7c, 0x70, 0x0f, 0x15, 0x26, 0x91, 0xbb, 0x3b, 0x68, 0x09, 0x2f, - 0x43, 0x49, 0x20, 0x7b, 0x9d, 0x4e, 0x1b, 0x15, 0x13, 0x9b, 0xbd, 0x63, 0xbd, 0x75, 0x74, 0x80, - 0x4a, 0x89, 0xcd, 0x03, 0xbd, 0x73, 0xd2, 0x45, 0x90, 0x58, 0x38, 0xd4, 0x7a, 0xbd, 0xc6, 0x81, - 0x86, 0xca, 0x09, 0x63, 0xef, 0xb3, 0x63, 0xad, 0x87, 0x2a, 0x13, 0x6e, 0xdd, 0xdd, 0x41, 0xcb, - 0xc9, 0x23, 0xb4, 0xa3, 0x93, 0x43, 0x54, 0xc5, 0xab, 0xb0, 0x2c, 0x1f, 0x11, 0x3b, 0xb1, 0x32, - 0x05, 0x3d, 0xb8, 0x87, 0xd0, 0xd8, 0x11, 0x69, 0x65, 0x75, 0x02, 0x78, 0x70, 0x0f, 0xe1, 0xda, - 0x3e, 0xe4, 0x45, 0x76, 0x61, 0x0c, 0xd5, 0x76, 0x63, 0x4f, 0x6b, 0x1b, 0x9d, 0xee, 0x71, 0xab, - 0x73, 0xd4, 0x68, 0x23, 0x65, 0x8c, 0xe9, 0xda, 0x4f, 0x4e, 0x5a, 0xba, 0xd6, 0x44, 0x99, 0x34, - 0xd6, 0xd5, 0x1a, 0xc7, 0x5a, 0x13, 0x65, 0x6b, 0x26, 0xac, 0xcf, 0xab, 0x33, 0x73, 0xdf, 0x8c, - 0xd4, 0x11, 0x67, 0x16, 0x1c, 0xb1, 0xb0, 0x35, 0x73, 0xc4, 0x5f, 0x2a, 0xb0, 0x36, 0xa7, 0xd6, - 0xce, 0x7d, 0xc8, 0x8f, 0x21, 0x2f, 0x53, 0x54, 0x76, 0x9f, 0x5b, 0x73, 0x8b, 0xb6, 0x48, 0xd8, - 0x99, 0x0e, 0x24, 0xf4, 0xd2, 0x1d, 0x38, 0xbb, 0xa0, 0x03, 0x73, 0x13, 0x33, 0x4e, 0xfe, 0x52, - 0x01, 0x75, 0x91, 0xed, 0xd7, 0x14, 0x8a, 0xcc, 0x44, 0xa1, 0xf8, 0x78, 0xda, 0x81, 0x6b, 0x8b, - 0xf7, 0x30, 0xe3, 0xc5, 0x57, 0x0a, 0x5c, 0x9a, 0x3f, 0xa8, 0xcc, 0xf5, 0xe1, 0x13, 0x28, 0x0c, - 0x69, 0x78, 0xe6, 0xc6, 0xcd, 0xfa, 0xfd, 0x39, 0x2d, 0x80, 0x8b, 0xa7, 0x63, 0x15, 0x69, 0xa5, - 0x7b, 0x48, 0x76, 0xd1, 0xb4, 0x21, 0xbd, 0x99, 0xf1, 0xf4, 0x57, 0x19, 0x78, 0x73, 0xae, 0xf1, - 0xb9, 0x8e, 0x5e, 0x01, 0xb0, 0x99, 0x37, 0x0a, 0x65, 0x43, 0x96, 0xf5, 0xa9, 0x24, 0x10, 0xf1, - 0xee, 0xf3, 0xda, 0x33, 0x0a, 0x13, 0x79, 0x56, 0xc8, 0x41, 0x42, 0x82, 0xf0, 0x70, 0xec, 0x68, - 0x4e, 0x38, 0xfa, 0xee, 0x82, 0x9d, 0xce, 0xf4, 0xba, 0x0f, 0x00, 0x99, 0x8e, 0x4d, 0x59, 0x68, - 0x04, 0xa1, 0x4f, 0xc9, 0xd0, 0x66, 0x03, 0x51, 0x80, 0x8b, 0xbb, 0xf9, 0x3e, 0x71, 0x02, 0xaa, - 0xaf, 0x48, 0x71, 0x2f, 0x96, 0x72, 0x0d, 0xd1, 0x65, 0xfc, 0x94, 0x46, 0x61, 0x42, 0x43, 0x8a, - 0x13, 0x8d, 0xda, 0xaf, 0x97, 0xa0, 0x9c, 0x1a, 0xeb, 0xf0, 0x35, 0xa8, 0x3c, 0x25, 0xcf, 0x89, - 0x11, 0x8f, 0xea, 0x32, 0x12, 0x65, 0x8e, 0x75, 0xa3, 0x71, 0xfd, 0x03, 0x58, 0x17, 0x14, 0x77, - 0x14, 0x52, 0xdf, 0x30, 0x1d, 0x12, 0x04, 0x22, 0x68, 0x45, 0x41, 0xc5, 0x5c, 0xd6, 0xe1, 0xa2, - 0xfd, 0x58, 0x82, 0xef, 0xc3, 0x9a, 0xd0, 0x18, 0x8e, 0x9c, 0xd0, 0xf6, 0x1c, 0x6a, 0xf0, 0x8f, - 0x87, 0x40, 0x14, 0xe2, 0xc4, 0xb3, 0x55, 0xce, 0x38, 0x8c, 0x08, 0xdc, 0xa3, 0x00, 0x1f, 0xc0, - 0x15, 0xa1, 0x36, 0xa0, 0x8c, 0xfa, 0x24, 0xa4, 0x06, 0xfd, 0xc5, 0x88, 0x38, 0x81, 0x41, 0x98, - 0x65, 0x9c, 0x91, 0xe0, 0x4c, 0x5d, 0x4f, 0x1b, 0xb8, 0xcc, 0xb9, 0x07, 0x11, 0x55, 0x13, 0xcc, - 0x06, 0xb3, 0x3e, 0x25, 0xc1, 0x19, 0xde, 0x85, 0x4b, 0xc2, 0x50, 0x10, 0xfa, 0x36, 0x1b, 0x18, - 0xe6, 0x19, 0x35, 0x9f, 0x19, 0xa3, 0xb0, 0xff, 0x50, 0x7d, 0x3b, 0x6d, 0x41, 0x38, 0xd9, 0x13, - 0x9c, 0x7d, 0x4e, 0x39, 0x09, 0xfb, 0x0f, 0x71, 0x0f, 0x2a, 0xfc, 0x3c, 0x86, 0xf6, 0x17, 0xd4, - 0xe8, 0xbb, 0xbe, 0x68, 0x2e, 0xd5, 0x39, 0x2f, 0x77, 0x2a, 0x88, 0xf5, 0x4e, 0xa4, 0x70, 0xe8, - 0x5a, 0x74, 0x37, 0xdf, 0xeb, 0x6a, 0x5a, 0x53, 0x2f, 0xc7, 0x56, 0x1e, 0xb9, 0x3e, 0xcf, 0xa9, - 0x81, 0x9b, 0xc4, 0xb8, 0x2c, 0x73, 0x6a, 0xe0, 0xc6, 0x11, 0xbe, 0x0f, 0x6b, 0xa6, 0x29, 0xb7, - 0x6d, 0x9b, 0x46, 0x34, 0xe5, 0x07, 0x2a, 0x9a, 0x88, 0x97, 0x69, 0x1e, 0x48, 0x42, 0x94, 0xe6, - 0x01, 0xfe, 0x08, 0xde, 0x1c, 0xc7, 0x2b, 0xad, 0xb8, 0x3a, 0xb3, 0xcb, 0x69, 0xd5, 0xfb, 0xb0, - 0xe6, 0x9d, 0xcf, 0x2a, 0xe2, 0x89, 0x27, 0x7a, 0xe7, 0xd3, 0x6a, 0x37, 0xc4, 0x97, 0x9b, 0x4f, - 0x4d, 0x12, 0x52, 0x4b, 0x7d, 0x2b, 0xcd, 0x4e, 0x09, 0xf0, 0x36, 0x20, 0xd3, 0x34, 0x28, 0x23, - 0xa7, 0x0e, 0x35, 0x88, 0x4f, 0x19, 0x09, 0xd4, 0xab, 0x69, 0x72, 0xd5, 0x34, 0x35, 0x21, 0x6d, - 0x08, 0x21, 0xbe, 0x0d, 0xab, 0xee, 0xe9, 0x53, 0x53, 0x26, 0x97, 0xe1, 0xf9, 0xb4, 0x6f, 0xbf, - 0x54, 0xaf, 0x8b, 0x30, 0xad, 0x70, 0x81, 0x48, 0xad, 0xae, 0x80, 0xf1, 0x2d, 0x40, 0x66, 0x70, - 0x46, 0x7c, 0x4f, 0x74, 0xf7, 0xc0, 0x23, 0x26, 0x55, 0x6f, 0x48, 0xaa, 0xc4, 0x8f, 0x62, 0x18, - 0x3f, 0x81, 0xf5, 0x11, 0xb3, 0x59, 0x48, 0x7d, 0xcf, 0xa7, 0x7c, 0x48, 0x97, 0x6f, 0x9a, 0xfa, - 0xaf, 0xa5, 0x05, 0x63, 0xf6, 0x49, 0x9a, 0x2d, 0x4f, 0x57, 0x5f, 0x1b, 0xcd, 0x82, 0xb5, 0x5d, - 0xa8, 0xa4, 0x0f, 0x1d, 0x97, 0x40, 0x1e, 0x3b, 0x52, 0x78, 0x0f, 0xdd, 0xef, 0x34, 0x79, 0xf7, - 0xfb, 0x5c, 0x43, 0x19, 0xde, 0x85, 0xdb, 0xad, 0x63, 0xcd, 0xd0, 0x4f, 0x8e, 0x8e, 0x5b, 0x87, - 0x1a, 0xca, 0xde, 0x2e, 0x15, 0xff, 0xbd, 0x84, 0x5e, 0xbd, 0x7a, 0xf5, 0x2a, 0xf3, 0x38, 0x57, - 0x7c, 0x1f, 0xdd, 0xac, 0xfd, 0x35, 0x03, 0xd5, 0xc9, 0xf9, 0x17, 0xff, 0x08, 0xde, 0x8a, 0x3f, - 0x56, 0x03, 0x1a, 0x1a, 0x2f, 0x6c, 0x5f, 0x64, 0xe3, 0x90, 0xc8, 0x09, 0x32, 0x09, 0xe4, 0x7a, - 0xc4, 0xea, 0xd1, 0xf0, 0x67, 0xb6, 0xcf, 0x73, 0x6d, 0x48, 0x42, 0xdc, 0x86, 0xab, 0xcc, 0x35, - 0x82, 0x90, 0x30, 0x8b, 0xf8, 0x96, 0x31, 0xbe, 0x26, 0x30, 0x88, 0x69, 0xd2, 0x20, 0x70, 0x65, - 0x23, 0x48, 0xac, 0xbc, 0xc3, 0xdc, 0x5e, 0x44, 0x1e, 0x57, 0xc8, 0x46, 0x44, 0x9d, 0x3a, 0xf4, - 0xec, 0xa2, 0x43, 0x7f, 0x1b, 0x4a, 0x43, 0xe2, 0x19, 0x94, 0x85, 0xfe, 0xb9, 0x98, 0xda, 0x8a, - 0x7a, 0x71, 0x48, 0x3c, 0x8d, 0xaf, 0xbf, 0xbd, 0x93, 0x48, 0x45, 0xb3, 0xf6, 0x8f, 0x2c, 0x54, - 0xd2, 0x93, 0x1b, 0x1f, 0x84, 0x4d, 0x51, 0xa5, 0x15, 0xf1, 0x12, 0xbf, 0xf7, 0xb5, 0x73, 0x5e, - 0x7d, 0x9f, 0x97, 0xef, 0xdd, 0x82, 0x9c, 0xa7, 0x74, 0xa9, 0xc9, 0x5b, 0x27, 0x7f, 0x6d, 0xa9, - 0x9c, 0xd2, 0x8b, 0x7a, 0xb4, 0xc2, 0x07, 0x50, 0x78, 0x1a, 0x08, 0xdb, 0x05, 0x61, 0xfb, 0xfa, - 0xd7, 0xdb, 0x7e, 0xdc, 0x13, 0xc6, 0x4b, 0x8f, 0x7b, 0xc6, 0x51, 0x47, 0x3f, 0x6c, 0xb4, 0xf5, - 0x48, 0x1d, 0x5f, 0x86, 0x9c, 0x43, 0xbe, 0x38, 0x9f, 0x2c, 0xf4, 0x02, 0xba, 0x68, 0xe0, 0x2f, - 0x43, 0xee, 0x05, 0x25, 0xcf, 0x26, 0xcb, 0xab, 0x80, 0xbe, 0xc5, 0x17, 0x60, 0x1b, 0xf2, 0x22, - 0x5e, 0x18, 0x20, 0x8a, 0x18, 0x7a, 0x03, 0x17, 0x21, 0xb7, 0xdf, 0xd1, 0xf9, 0x4b, 0x80, 0xa0, - 0x22, 0x51, 0xa3, 0xdb, 0xd2, 0xf6, 0x35, 0x94, 0xa9, 0xdd, 0x87, 0x82, 0x0c, 0x02, 0x7f, 0x41, - 0x92, 0x30, 0xa0, 0x37, 0xa2, 0x65, 0x64, 0x43, 0x89, 0xa5, 0x27, 0x87, 0x7b, 0x9a, 0x8e, 0x32, - 0xe9, 0xe3, 0x0d, 0xa0, 0x92, 0x1e, 0xda, 0xbe, 0x9b, 0x9c, 0xfa, 0x8b, 0x02, 0xe5, 0xd4, 0x10, - 0xc6, 0xdb, 0x3f, 0x71, 0x1c, 0xf7, 0x85, 0x41, 0x1c, 0x9b, 0x04, 0x51, 0x52, 0x80, 0x80, 0x1a, - 0x1c, 0xb9, 0xe8, 0xa1, 0x7d, 0x27, 0xce, 0xff, 0x41, 0x01, 0x34, 0x3d, 0xc0, 0x4d, 0x39, 0xa8, - 0x7c, 0xaf, 0x0e, 0xfe, 0x5e, 0x81, 0xea, 0xe4, 0xd4, 0x36, 0xe5, 0xde, 0xb5, 0xef, 0xd5, 0xbd, - 0xdf, 0x29, 0xb0, 0x3c, 0x31, 0xab, 0xfd, 0x5f, 0x79, 0xf7, 0xdb, 0x2c, 0xac, 0xcd, 0xd1, 0xc3, - 0x8d, 0x68, 0xa8, 0x95, 0x73, 0xf6, 0x0f, 0x2f, 0xf2, 0xac, 0x3a, 0xef, 0x99, 0x5d, 0xe2, 0x87, - 0xd1, 0x0c, 0x7c, 0x0b, 0x90, 0x6d, 0x51, 0x16, 0xda, 0x7d, 0x9b, 0xfa, 0xd1, 0x87, 0xb8, 0x9c, - 0x74, 0x57, 0xc6, 0xb8, 0xfc, 0x16, 0xff, 0x01, 0x60, 0xcf, 0x0d, 0xec, 0xd0, 0x7e, 0x4e, 0x0d, - 0x9b, 0xc5, 0x5f, 0xed, 0x7c, 0xf2, 0xcd, 0xe9, 0x28, 0x96, 0xb4, 0x58, 0x98, 0xb0, 0x19, 0x1d, - 0x90, 0x29, 0x36, 0xaf, 0x7d, 0x59, 0x1d, 0xc5, 0x92, 0x84, 0x7d, 0x0d, 0x2a, 0x96, 0x3b, 0xe2, - 0x43, 0x84, 0xe4, 0xf1, 0x52, 0xab, 0xe8, 0x65, 0x89, 0x25, 0x94, 0x68, 0xca, 0x1b, 0x5f, 0x17, - 0x54, 0xf4, 0xb2, 0xc4, 0x24, 0xe5, 0x26, 0xac, 0x90, 0xc1, 0xc0, 0xe7, 0xc6, 0x63, 0x43, 0x72, - 0x74, 0xad, 0x26, 0xb0, 0x20, 0x6e, 0x3c, 0x86, 0x62, 0x1c, 0x07, 0xde, 0xcd, 0x78, 0x24, 0x0c, - 0x4f, 0x5e, 0xda, 0x64, 0xb6, 0x4a, 0x7a, 0x91, 0xc5, 0xc2, 0x6b, 0x50, 0xb1, 0x03, 0x63, 0x7c, - 0x7b, 0x98, 0xd9, 0xcc, 0x6c, 0x15, 0xf5, 0xb2, 0x1d, 0x24, 0xd7, 0x45, 0xb5, 0xaf, 0x32, 0x50, - 0x9d, 0xbc, 0xfd, 0xc4, 0x4d, 0x28, 0x3a, 0xae, 0x49, 0x44, 0x22, 0xc8, 0xab, 0xf7, 0xad, 0xd7, - 0x5c, 0x98, 0xd6, 0xdb, 0x11, 0x5f, 0x4f, 0x34, 0x37, 0xfe, 0xa6, 0x40, 0x31, 0x86, 0xf1, 0x25, - 0xc8, 0x79, 0x24, 0x3c, 0x13, 0xe6, 0xf2, 0x7b, 0x19, 0xa4, 0xe8, 0x62, 0xcd, 0xf1, 0xc0, 0x23, - 0x4c, 0xa4, 0x40, 0x84, 0xf3, 0x35, 0x3f, 0x57, 0x87, 0x12, 0x4b, 0x0c, 0xc5, 0xee, 0x70, 0x48, - 0x59, 0x18, 0xc4, 0xe7, 0x1a, 0xe1, 0xfb, 0x11, 0x8c, 0xef, 0xc0, 0x6a, 0xe8, 0x13, 0xdb, 0x99, - 0xe0, 0xe6, 0x04, 0x17, 0xc5, 0x82, 0x84, 0xbc, 0x0b, 0x97, 0x63, 0xbb, 0x16, 0x0d, 0x89, 0x79, - 0x46, 0xad, 0xb1, 0x52, 0x41, 0x5c, 0xad, 0xbd, 0x15, 0x11, 0x9a, 0x91, 0x3c, 0xd6, 0xad, 0xfd, - 0x5d, 0x81, 0xd5, 0x78, 0x8c, 0xb7, 0x92, 0x60, 0x1d, 0x02, 0x10, 0xc6, 0xdc, 0x30, 0x1d, 0xae, - 0xd9, 0x54, 0x9e, 0xd1, 0xab, 0x37, 0x12, 0x25, 0x3d, 0x65, 0x60, 0x63, 0x08, 0x30, 0x96, 0x2c, - 0x0c, 0xdb, 0x55, 0x28, 0x47, 0x57, 0xdb, 0xe2, 0xef, 0x23, 0xf2, 0xdb, 0x0f, 0x24, 0xc4, 0xe7, - 0x7d, 0xbc, 0x0e, 0xf9, 0x53, 0x3a, 0xb0, 0x59, 0x74, 0xe1, 0x26, 0x17, 0xf1, 0x35, 0x5e, 0x2e, - 0xb9, 0xc6, 0xdb, 0xfb, 0x39, 0xac, 0x99, 0xee, 0x70, 0xda, 0xdd, 0x3d, 0x34, 0xf5, 0xfd, 0x19, - 0x7c, 0xaa, 0x7c, 0x0e, 0xe3, 0xe9, 0xec, 0x8f, 0x8a, 0xf2, 0x65, 0x26, 0x7b, 0xd0, 0xdd, 0xfb, - 0x53, 0x66, 0xe3, 0x40, 0xaa, 0x76, 0xe3, 0x9d, 0xea, 0xb4, 0xef, 0x50, 0x93, 0x7b, 0xff, 0xdf, - 0x00, 0x00, 0x00, 0xff, 0xff, 0x4c, 0x8f, 0xed, 0xda, 0x1b, 0x1a, 0x00, 0x00, + proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) +} + +func init() { proto.RegisterFile("google/protobuf/descriptor.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 2490 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xdd, 0x8e, 0xdb, 0xc6, + 0x15, 0x8e, 0x7e, 0x57, 0x3a, 0xd2, 0x6a, 0x67, 0x67, 0x37, 0x36, 0xbd, 0xf9, 0xf1, 0x5a, 0xf9, + 0xf1, 0x3a, 0x69, 0xb4, 0xc1, 0xc6, 0x76, 0x9c, 0x4d, 0xe1, 0x42, 0x2b, 0xd1, 0x1b, 0xb9, 0x5a, + 0x49, 0xa5, 0xb4, 0x8d, 0x9d, 0x1b, 0x62, 0x96, 0x1c, 0x49, 0xb4, 0x29, 0x92, 0x21, 0x29, 0xdb, + 0x9b, 0x2b, 0x03, 0xbd, 0x2a, 0xd0, 0x07, 0x28, 0x8a, 0xa2, 0x17, 0xb9, 0x09, 0xd0, 0x07, 0x28, + 0xd0, 0xbb, 0x3e, 0x41, 0x81, 0xbc, 0x41, 0x51, 0x14, 0x68, 0xdf, 0xa0, 0xb7, 0xc5, 0xcc, 0x90, + 0x14, 0xa9, 0x1f, 0x7b, 0x1b, 0xc0, 0xc9, 0x95, 0x34, 0xdf, 0xf9, 0xce, 0x99, 0x33, 0x67, 0xce, + 0xcc, 0x9c, 0x19, 0xc2, 0xee, 0xc8, 0xb6, 0x47, 0x26, 0xdd, 0x77, 0x5c, 0xdb, 0xb7, 0xcf, 0xa6, + 0xc3, 0x7d, 0x9d, 0x7a, 0x9a, 0x6b, 0x38, 0xbe, 0xed, 0xd6, 0x38, 0x86, 0x37, 0x04, 0xa3, 0x16, + 0x32, 0xaa, 0x27, 0xb0, 0x79, 0xcf, 0x30, 0x69, 0x33, 0x22, 0xf6, 0xa9, 0x8f, 0xef, 0x40, 0x76, + 0x68, 0x98, 0x54, 0x4a, 0xed, 0x66, 0xf6, 0x4a, 0x07, 0xef, 0xd6, 0xe6, 0x94, 0x6a, 0x49, 0x8d, + 0x1e, 0x83, 0x15, 0xae, 0x51, 0xfd, 0x57, 0x16, 0xb6, 0x96, 0x48, 0x31, 0x86, 0xac, 0x45, 0x26, + 0xcc, 0x62, 0x6a, 0xaf, 0xa8, 0xf0, 0xff, 0x58, 0x82, 0x35, 0x87, 0x68, 0x8f, 0xc9, 0x88, 0x4a, + 0x69, 0x0e, 0x87, 0x4d, 0xfc, 0x36, 0x80, 0x4e, 0x1d, 0x6a, 0xe9, 0xd4, 0xd2, 0xce, 0xa5, 0xcc, + 0x6e, 0x66, 0xaf, 0xa8, 0xc4, 0x10, 0xfc, 0x21, 0x6c, 0x3a, 0xd3, 0x33, 0xd3, 0xd0, 0xd4, 0x18, + 0x0d, 0x76, 0x33, 0x7b, 0x39, 0x05, 0x09, 0x41, 0x73, 0x46, 0xbe, 0x0e, 0x1b, 0x4f, 0x29, 0x79, + 0x1c, 0xa7, 0x96, 0x38, 0xb5, 0xc2, 0xe0, 0x18, 0xb1, 0x01, 0xe5, 0x09, 0xf5, 0x3c, 0x32, 0xa2, + 0xaa, 0x7f, 0xee, 0x50, 0x29, 0xcb, 0x47, 0xbf, 0xbb, 0x30, 0xfa, 0xf9, 0x91, 0x97, 0x02, 0xad, + 0xc1, 0xb9, 0x43, 0x71, 0x1d, 0x8a, 0xd4, 0x9a, 0x4e, 0x84, 0x85, 0xdc, 0x8a, 0xf8, 0xc9, 0xd6, + 0x74, 0x32, 0x6f, 0xa5, 0xc0, 0xd4, 0x02, 0x13, 0x6b, 0x1e, 0x75, 0x9f, 0x18, 0x1a, 0x95, 0xf2, + 0xdc, 0xc0, 0xf5, 0x05, 0x03, 0x7d, 0x21, 0x9f, 0xb7, 0x11, 0xea, 0xe1, 0x06, 0x14, 0xe9, 0x33, + 0x9f, 0x5a, 0x9e, 0x61, 0x5b, 0xd2, 0x1a, 0x37, 0xf2, 0xde, 0x92, 0x59, 0xa4, 0xa6, 0x3e, 0x6f, + 0x62, 0xa6, 0x87, 0x6f, 0xc3, 0x9a, 0xed, 0xf8, 0x86, 0x6d, 0x79, 0x52, 0x61, 0x37, 0xb5, 0x57, + 0x3a, 0x78, 0x73, 0x69, 0x22, 0x74, 0x05, 0x47, 0x09, 0xc9, 0xb8, 0x05, 0xc8, 0xb3, 0xa7, 0xae, + 0x46, 0x55, 0xcd, 0xd6, 0xa9, 0x6a, 0x58, 0x43, 0x5b, 0x2a, 0x72, 0x03, 0x57, 0x17, 0x07, 0xc2, + 0x89, 0x0d, 0x5b, 0xa7, 0x2d, 0x6b, 0x68, 0x2b, 0x15, 0x2f, 0xd1, 0xc6, 0x97, 0x20, 0xef, 0x9d, + 0x5b, 0x3e, 0x79, 0x26, 0x95, 0x79, 0x86, 0x04, 0xad, 0xea, 0x7f, 0x73, 0xb0, 0x71, 0x91, 0x14, + 0xfb, 0x1c, 0x72, 0x43, 0x36, 0x4a, 0x29, 0xfd, 0xff, 0xc4, 0x40, 0xe8, 0x24, 0x83, 0x98, 0xff, + 0x81, 0x41, 0xac, 0x43, 0xc9, 0xa2, 0x9e, 0x4f, 0x75, 0x91, 0x11, 0x99, 0x0b, 0xe6, 0x14, 0x08, + 0xa5, 0xc5, 0x94, 0xca, 0xfe, 0xa0, 0x94, 0x7a, 0x00, 0x1b, 0x91, 0x4b, 0xaa, 0x4b, 0xac, 0x51, + 0x98, 0x9b, 0xfb, 0x2f, 0xf3, 0xa4, 0x26, 0x87, 0x7a, 0x0a, 0x53, 0x53, 0x2a, 0x34, 0xd1, 0xc6, + 0x4d, 0x00, 0xdb, 0xa2, 0xf6, 0x50, 0xd5, 0xa9, 0x66, 0x4a, 0x85, 0x15, 0x51, 0xea, 0x32, 0xca, + 0x42, 0x94, 0x6c, 0x81, 0x6a, 0x26, 0xfe, 0x6c, 0x96, 0x6a, 0x6b, 0x2b, 0x32, 0xe5, 0x44, 0x2c, + 0xb2, 0x85, 0x6c, 0x3b, 0x85, 0x8a, 0x4b, 0x59, 0xde, 0x53, 0x3d, 0x18, 0x59, 0x91, 0x3b, 0x51, + 0x7b, 0xe9, 0xc8, 0x94, 0x40, 0x4d, 0x0c, 0x6c, 0xdd, 0x8d, 0x37, 0xf1, 0x3b, 0x10, 0x01, 0x2a, + 0x4f, 0x2b, 0xe0, 0xbb, 0x50, 0x39, 0x04, 0x3b, 0x64, 0x42, 0x77, 0xee, 0x40, 0x25, 0x19, 0x1e, + 0xbc, 0x0d, 0x39, 0xcf, 0x27, 0xae, 0xcf, 0xb3, 0x30, 0xa7, 0x88, 0x06, 0x46, 0x90, 0xa1, 0x96, + 0xce, 0x77, 0xb9, 0x9c, 0xc2, 0xfe, 0xee, 0x7c, 0x0a, 0xeb, 0x89, 0xee, 0x2f, 0xaa, 0x58, 0xfd, + 0x7d, 0x1e, 0xb6, 0x97, 0xe5, 0xdc, 0xd2, 0xf4, 0xbf, 0x04, 0x79, 0x6b, 0x3a, 0x39, 0xa3, 0xae, + 0x94, 0xe1, 0x16, 0x82, 0x16, 0xae, 0x43, 0xce, 0x24, 0x67, 0xd4, 0x94, 0xb2, 0xbb, 0xa9, 0xbd, + 0xca, 0xc1, 0x87, 0x17, 0xca, 0xea, 0x5a, 0x9b, 0xa9, 0x28, 0x42, 0x13, 0xdf, 0x85, 0x6c, 0xb0, + 0xc5, 0x31, 0x0b, 0x1f, 0x5c, 0xcc, 0x02, 0xcb, 0x45, 0x85, 0xeb, 0xe1, 0x37, 0xa0, 0xc8, 0x7e, + 0x45, 0x6c, 0xf3, 0xdc, 0xe7, 0x02, 0x03, 0x58, 0x5c, 0xf1, 0x0e, 0x14, 0x78, 0x9a, 0xe9, 0x34, + 0x3c, 0x1a, 0xa2, 0x36, 0x9b, 0x18, 0x9d, 0x0e, 0xc9, 0xd4, 0xf4, 0xd5, 0x27, 0xc4, 0x9c, 0x52, + 0x9e, 0x30, 0x45, 0xa5, 0x1c, 0x80, 0xbf, 0x66, 0x18, 0xbe, 0x0a, 0x25, 0x91, 0x95, 0x86, 0xa5, + 0xd3, 0x67, 0x7c, 0xf7, 0xc9, 0x29, 0x22, 0x51, 0x5b, 0x0c, 0x61, 0xdd, 0x3f, 0xf2, 0x6c, 0x2b, + 0x9c, 0x5a, 0xde, 0x05, 0x03, 0x78, 0xf7, 0x9f, 0xce, 0x6f, 0x7c, 0x6f, 0x2d, 0x1f, 0xde, 0x7c, + 0x2e, 0x56, 0xff, 0x92, 0x86, 0x2c, 0x5f, 0x6f, 0x1b, 0x50, 0x1a, 0x3c, 0xec, 0xc9, 0x6a, 0xb3, + 0x7b, 0x7a, 0xd4, 0x96, 0x51, 0x0a, 0x57, 0x00, 0x38, 0x70, 0xaf, 0xdd, 0xad, 0x0f, 0x50, 0x3a, + 0x6a, 0xb7, 0x3a, 0x83, 0xdb, 0x37, 0x51, 0x26, 0x52, 0x38, 0x15, 0x40, 0x36, 0x4e, 0xf8, 0xe4, + 0x00, 0xe5, 0x30, 0x82, 0xb2, 0x30, 0xd0, 0x7a, 0x20, 0x37, 0x6f, 0xdf, 0x44, 0xf9, 0x24, 0xf2, + 0xc9, 0x01, 0x5a, 0xc3, 0xeb, 0x50, 0xe4, 0xc8, 0x51, 0xb7, 0xdb, 0x46, 0x85, 0xc8, 0x66, 0x7f, + 0xa0, 0xb4, 0x3a, 0xc7, 0xa8, 0x18, 0xd9, 0x3c, 0x56, 0xba, 0xa7, 0x3d, 0x04, 0x91, 0x85, 0x13, + 0xb9, 0xdf, 0xaf, 0x1f, 0xcb, 0xa8, 0x14, 0x31, 0x8e, 0x1e, 0x0e, 0xe4, 0x3e, 0x2a, 0x27, 0xdc, + 0xfa, 0xe4, 0x00, 0xad, 0x47, 0x5d, 0xc8, 0x9d, 0xd3, 0x13, 0x54, 0xc1, 0x9b, 0xb0, 0x2e, 0xba, + 0x08, 0x9d, 0xd8, 0x98, 0x83, 0x6e, 0xdf, 0x44, 0x68, 0xe6, 0x88, 0xb0, 0xb2, 0x99, 0x00, 0x6e, + 0xdf, 0x44, 0xb8, 0xda, 0x80, 0x1c, 0xcf, 0x2e, 0x8c, 0xa1, 0xd2, 0xae, 0x1f, 0xc9, 0x6d, 0xb5, + 0xdb, 0x1b, 0xb4, 0xba, 0x9d, 0x7a, 0x1b, 0xa5, 0x66, 0x98, 0x22, 0xff, 0xea, 0xb4, 0xa5, 0xc8, + 0x4d, 0x94, 0x8e, 0x63, 0x3d, 0xb9, 0x3e, 0x90, 0x9b, 0x28, 0x53, 0xd5, 0x60, 0x7b, 0xd9, 0x3e, + 0xb3, 0x74, 0x65, 0xc4, 0xa6, 0x38, 0xbd, 0x62, 0x8a, 0xb9, 0xad, 0x85, 0x29, 0xfe, 0x36, 0x05, + 0x5b, 0x4b, 0xf6, 0xda, 0xa5, 0x9d, 0xfc, 0x02, 0x72, 0x22, 0x45, 0xc5, 0xe9, 0x73, 0x63, 0xe9, + 0xa6, 0xcd, 0x13, 0x76, 0xe1, 0x04, 0xe2, 0x7a, 0xf1, 0x13, 0x38, 0xb3, 0xe2, 0x04, 0x66, 0x26, + 0x16, 0x9c, 0xfc, 0x4d, 0x0a, 0xa4, 0x55, 0xb6, 0x5f, 0xb2, 0x51, 0xa4, 0x13, 0x1b, 0xc5, 0xe7, + 0xf3, 0x0e, 0x5c, 0x5b, 0x3d, 0x86, 0x05, 0x2f, 0xbe, 0x4b, 0xc1, 0xa5, 0xe5, 0x85, 0xca, 0x52, + 0x1f, 0xee, 0x42, 0x7e, 0x42, 0xfd, 0xb1, 0x1d, 0x1e, 0xd6, 0xef, 0x2f, 0x39, 0x02, 0x98, 0x78, + 0x3e, 0x56, 0x81, 0x56, 0xfc, 0x0c, 0xc9, 0xac, 0xaa, 0x36, 0x84, 0x37, 0x0b, 0x9e, 0xfe, 0x36, + 0x0d, 0xaf, 0x2f, 0x35, 0xbe, 0xd4, 0xd1, 0xb7, 0x00, 0x0c, 0xcb, 0x99, 0xfa, 0xe2, 0x40, 0x16, + 0xfb, 0x53, 0x91, 0x23, 0x7c, 0xed, 0xb3, 0xbd, 0x67, 0xea, 0x47, 0xf2, 0x0c, 0x97, 0x83, 0x80, + 0x38, 0xe1, 0xce, 0xcc, 0xd1, 0x2c, 0x77, 0xf4, 0xed, 0x15, 0x23, 0x5d, 0x38, 0xeb, 0x3e, 0x06, + 0xa4, 0x99, 0x06, 0xb5, 0x7c, 0xd5, 0xf3, 0x5d, 0x4a, 0x26, 0x86, 0x35, 0xe2, 0x1b, 0x70, 0xe1, + 0x30, 0x37, 0x24, 0xa6, 0x47, 0x95, 0x0d, 0x21, 0xee, 0x87, 0x52, 0xa6, 0xc1, 0x4f, 0x19, 0x37, + 0xa6, 0x91, 0x4f, 0x68, 0x08, 0x71, 0xa4, 0x51, 0xfd, 0x6b, 0x01, 0x4a, 0xb1, 0xb2, 0x0e, 0x5f, + 0x83, 0xf2, 0x23, 0xf2, 0x84, 0xa8, 0x61, 0xa9, 0x2e, 0x22, 0x51, 0x62, 0x58, 0x2f, 0x28, 0xd7, + 0x3f, 0x86, 0x6d, 0x4e, 0xb1, 0xa7, 0x3e, 0x75, 0x55, 0xcd, 0x24, 0x9e, 0xc7, 0x83, 0x56, 0xe0, + 0x54, 0xcc, 0x64, 0x5d, 0x26, 0x6a, 0x84, 0x12, 0x7c, 0x0b, 0xb6, 0xb8, 0xc6, 0x64, 0x6a, 0xfa, + 0x86, 0x63, 0x52, 0x95, 0x5d, 0x1e, 0x3c, 0xbe, 0x11, 0x47, 0x9e, 0x6d, 0x32, 0xc6, 0x49, 0x40, + 0x60, 0x1e, 0x79, 0xb8, 0x09, 0x6f, 0x71, 0xb5, 0x11, 0xb5, 0xa8, 0x4b, 0x7c, 0xaa, 0xd2, 0xaf, + 0xa7, 0xc4, 0xf4, 0x54, 0x62, 0xe9, 0xea, 0x98, 0x78, 0x63, 0x69, 0x9b, 0x19, 0x38, 0x4a, 0x4b, + 0x29, 0xe5, 0x0a, 0x23, 0x1e, 0x07, 0x3c, 0x99, 0xd3, 0xea, 0x96, 0xfe, 0x05, 0xf1, 0xc6, 0xf8, + 0x10, 0x2e, 0x71, 0x2b, 0x9e, 0xef, 0x1a, 0xd6, 0x48, 0xd5, 0xc6, 0x54, 0x7b, 0xac, 0x4e, 0xfd, + 0xe1, 0x1d, 0xe9, 0x8d, 0x78, 0xff, 0xdc, 0xc3, 0x3e, 0xe7, 0x34, 0x18, 0xe5, 0xd4, 0x1f, 0xde, + 0xc1, 0x7d, 0x28, 0xb3, 0xc9, 0x98, 0x18, 0xdf, 0x50, 0x75, 0x68, 0xbb, 0xfc, 0x64, 0xa9, 0x2c, + 0x59, 0xd9, 0xb1, 0x08, 0xd6, 0xba, 0x81, 0xc2, 0x89, 0xad, 0xd3, 0xc3, 0x5c, 0xbf, 0x27, 0xcb, + 0x4d, 0xa5, 0x14, 0x5a, 0xb9, 0x67, 0xbb, 0x2c, 0xa1, 0x46, 0x76, 0x14, 0xe0, 0x92, 0x48, 0xa8, + 0x91, 0x1d, 0x86, 0xf7, 0x16, 0x6c, 0x69, 0x9a, 0x18, 0xb3, 0xa1, 0xa9, 0x41, 0x89, 0xef, 0x49, + 0x28, 0x11, 0x2c, 0x4d, 0x3b, 0x16, 0x84, 0x20, 0xc7, 0x3d, 0xfc, 0x19, 0xbc, 0x3e, 0x0b, 0x56, + 0x5c, 0x71, 0x73, 0x61, 0x94, 0xf3, 0xaa, 0xb7, 0x60, 0xcb, 0x39, 0x5f, 0x54, 0xc4, 0x89, 0x1e, + 0x9d, 0xf3, 0x79, 0xb5, 0x4f, 0x61, 0xdb, 0x19, 0x3b, 0x8b, 0x7a, 0x5b, 0x71, 0x3d, 0xec, 0x8c, + 0x9d, 0x79, 0xc5, 0xf7, 0xf8, 0x7d, 0xcf, 0xa5, 0x1a, 0xf1, 0xa9, 0x2e, 0x5d, 0x8e, 0xd3, 0x63, + 0x02, 0xbc, 0x0f, 0x48, 0xd3, 0x54, 0x6a, 0x91, 0x33, 0x93, 0xaa, 0xc4, 0xa5, 0x16, 0xf1, 0xa4, + 0xab, 0x71, 0x72, 0x45, 0xd3, 0x64, 0x2e, 0xad, 0x73, 0x21, 0xfe, 0x00, 0x36, 0xed, 0xb3, 0x47, + 0x9a, 0x48, 0x49, 0xd5, 0x71, 0xe9, 0xd0, 0x78, 0x26, 0xbd, 0xcb, 0xe3, 0xbb, 0xc1, 0x04, 0x3c, + 0x21, 0x7b, 0x1c, 0xc6, 0x37, 0x00, 0x69, 0xde, 0x98, 0xb8, 0x0e, 0xaf, 0x09, 0x3c, 0x87, 0x68, + 0x54, 0x7a, 0x4f, 0x50, 0x05, 0xde, 0x09, 0x61, 0xb6, 0x24, 0xbc, 0xa7, 0xc6, 0xd0, 0x0f, 0x2d, + 0x5e, 0x17, 0x4b, 0x82, 0x63, 0x81, 0xb5, 0x3d, 0x40, 0x2c, 0x14, 0x89, 0x8e, 0xf7, 0x38, 0xad, + 0xe2, 0x8c, 0x9d, 0x78, 0xbf, 0xef, 0xc0, 0x3a, 0x63, 0xce, 0x3a, 0xbd, 0x21, 0xea, 0x19, 0x67, + 0x1c, 0xeb, 0xf1, 0x01, 0x6c, 0x4f, 0x2d, 0xc3, 0xf2, 0xa9, 0xeb, 0xb8, 0x94, 0x5d, 0x26, 0xc4, + 0x8e, 0x20, 0xfd, 0x7b, 0x6d, 0xc5, 0x75, 0xe0, 0x34, 0xce, 0x16, 0x89, 0xa8, 0x6c, 0x4d, 0x17, + 0xc1, 0xea, 0x21, 0x94, 0xe3, 0xf9, 0x89, 0x8b, 0x20, 0x32, 0x14, 0xa5, 0xd8, 0x59, 0xdf, 0xe8, + 0x36, 0xd9, 0x29, 0xfd, 0x95, 0x8c, 0xd2, 0xac, 0x5a, 0x68, 0xb7, 0x06, 0xb2, 0xaa, 0x9c, 0x76, + 0x06, 0xad, 0x13, 0x19, 0x65, 0x3e, 0x28, 0x16, 0xfe, 0xb3, 0x86, 0x9e, 0x3f, 0x7f, 0xfe, 0x3c, + 0x7d, 0x3f, 0x5b, 0x78, 0x1f, 0x5d, 0xaf, 0x7e, 0x9f, 0x86, 0x4a, 0xb2, 0x4e, 0xc7, 0x3f, 0x87, + 0xcb, 0xe1, 0xa5, 0xda, 0xa3, 0xbe, 0xfa, 0xd4, 0x70, 0xf9, 0xc2, 0x99, 0x10, 0x51, 0xe9, 0x46, + 0x53, 0xb7, 0x1d, 0xb0, 0xfa, 0xd4, 0xff, 0xd2, 0x70, 0xd9, 0xb2, 0x98, 0x10, 0x1f, 0xb7, 0xe1, + 0xaa, 0x65, 0xab, 0x9e, 0x4f, 0x2c, 0x9d, 0xb8, 0xba, 0x3a, 0x7b, 0xce, 0x50, 0x89, 0xa6, 0x51, + 0xcf, 0xb3, 0xc5, 0x81, 0x15, 0x59, 0x79, 0xd3, 0xb2, 0xfb, 0x01, 0x79, 0xb6, 0x93, 0xd7, 0x03, + 0xea, 0x5c, 0x9a, 0x65, 0x56, 0xa5, 0xd9, 0x1b, 0x50, 0x9c, 0x10, 0x47, 0xa5, 0x96, 0xef, 0x9e, + 0xf3, 0xea, 0xb2, 0xa0, 0x14, 0x26, 0xc4, 0x91, 0x59, 0xfb, 0xd5, 0xcd, 0x44, 0x32, 0x9a, 0x05, + 0x54, 0xbc, 0x9f, 0x2d, 0x14, 0x11, 0x54, 0xff, 0x99, 0x81, 0x72, 0xbc, 0xda, 0x64, 0xc5, 0xbb, + 0xc6, 0x4f, 0x96, 0x14, 0xdf, 0x7b, 0xde, 0x79, 0x61, 0x6d, 0x5a, 0x6b, 0xb0, 0x23, 0xe7, 0x30, + 0x2f, 0x6a, 0x40, 0x45, 0x68, 0xb2, 0xe3, 0x9e, 0xed, 0x36, 0x54, 0xdc, 0x2c, 0x0a, 0x4a, 0xd0, + 0xc2, 0xc7, 0x90, 0x7f, 0xe4, 0x71, 0xdb, 0x79, 0x6e, 0xfb, 0xdd, 0x17, 0xdb, 0xbe, 0xdf, 0xe7, + 0xc6, 0x8b, 0xf7, 0xfb, 0x6a, 0xa7, 0xab, 0x9c, 0xd4, 0xdb, 0x4a, 0xa0, 0x8e, 0xaf, 0x40, 0xd6, + 0x24, 0xdf, 0x9c, 0x27, 0x0f, 0x27, 0x0e, 0x5d, 0x74, 0x12, 0xae, 0x40, 0xf6, 0x29, 0x25, 0x8f, + 0x93, 0x47, 0x02, 0x87, 0x5e, 0xe1, 0x62, 0xd8, 0x87, 0x1c, 0x8f, 0x17, 0x06, 0x08, 0x22, 0x86, + 0x5e, 0xc3, 0x05, 0xc8, 0x36, 0xba, 0x0a, 0x5b, 0x10, 0x08, 0xca, 0x02, 0x55, 0x7b, 0x2d, 0xb9, + 0x21, 0xa3, 0x74, 0xf5, 0x16, 0xe4, 0x45, 0x10, 0xd8, 0x62, 0x89, 0xc2, 0x80, 0x5e, 0x0b, 0x9a, + 0x81, 0x8d, 0x54, 0x28, 0x3d, 0x3d, 0x39, 0x92, 0x15, 0x94, 0x4e, 0x4e, 0x75, 0x16, 0xe5, 0xaa, + 0x1e, 0x94, 0xe3, 0xe5, 0xe6, 0x8f, 0x92, 0x65, 0xd5, 0xbf, 0xa5, 0xa0, 0x14, 0x2b, 0x1f, 0x59, + 0xe1, 0x42, 0x4c, 0xd3, 0x7e, 0xaa, 0x12, 0xd3, 0x20, 0x5e, 0x90, 0x1a, 0xc0, 0xa1, 0x3a, 0x43, + 0x2e, 0x3a, 0x75, 0x3f, 0xd2, 0x12, 0xc9, 0xa1, 0x7c, 0xf5, 0x4f, 0x29, 0x40, 0xf3, 0x05, 0xe8, + 0x9c, 0x9b, 0xa9, 0x9f, 0xd2, 0xcd, 0xea, 0x1f, 0x53, 0x50, 0x49, 0x56, 0x9d, 0x73, 0xee, 0x5d, + 0xfb, 0x49, 0xdd, 0xfb, 0x47, 0x1a, 0xd6, 0x13, 0xb5, 0xe6, 0x45, 0xbd, 0xfb, 0x1a, 0x36, 0x0d, + 0x9d, 0x4e, 0x1c, 0xdb, 0xa7, 0x96, 0x76, 0xae, 0x9a, 0xf4, 0x09, 0x35, 0xa5, 0x2a, 0xdf, 0x34, + 0xf6, 0x5f, 0x5c, 0xcd, 0xd6, 0x5a, 0x33, 0xbd, 0x36, 0x53, 0x3b, 0xdc, 0x6a, 0x35, 0xe5, 0x93, + 0x5e, 0x77, 0x20, 0x77, 0x1a, 0x0f, 0xd5, 0xd3, 0xce, 0x2f, 0x3b, 0xdd, 0x2f, 0x3b, 0x0a, 0x32, + 0xe6, 0x68, 0xaf, 0x70, 0xd9, 0xf7, 0x00, 0xcd, 0x3b, 0x85, 0x2f, 0xc3, 0x32, 0xb7, 0xd0, 0x6b, + 0x78, 0x0b, 0x36, 0x3a, 0x5d, 0xb5, 0xdf, 0x6a, 0xca, 0xaa, 0x7c, 0xef, 0x9e, 0xdc, 0x18, 0xf4, + 0xc5, 0xf5, 0x3e, 0x62, 0x0f, 0x12, 0x0b, 0xbc, 0xfa, 0x87, 0x0c, 0x6c, 0x2d, 0xf1, 0x04, 0xd7, + 0x83, 0x9b, 0x85, 0xb8, 0xec, 0x7c, 0x74, 0x11, 0xef, 0x6b, 0xac, 0x20, 0xe8, 0x11, 0xd7, 0x0f, + 0x2e, 0x22, 0x37, 0x80, 0x45, 0xc9, 0xf2, 0x8d, 0xa1, 0x41, 0xdd, 0xe0, 0x35, 0x44, 0x5c, 0x37, + 0x36, 0x66, 0xb8, 0x78, 0x10, 0xf9, 0x19, 0x60, 0xc7, 0xf6, 0x0c, 0xdf, 0x78, 0x42, 0x55, 0xc3, + 0x0a, 0x9f, 0x4e, 0xd8, 0xf5, 0x23, 0xab, 0xa0, 0x50, 0xd2, 0xb2, 0xfc, 0x88, 0x6d, 0xd1, 0x11, + 0x99, 0x63, 0xb3, 0xcd, 0x3c, 0xa3, 0xa0, 0x50, 0x12, 0xb1, 0xaf, 0x41, 0x59, 0xb7, 0xa7, 0xac, + 0x26, 0x13, 0x3c, 0x76, 0x76, 0xa4, 0x94, 0x92, 0xc0, 0x22, 0x4a, 0x50, 0x6d, 0xcf, 0xde, 0x6c, + 0xca, 0x4a, 0x49, 0x60, 0x82, 0x72, 0x1d, 0x36, 0xc8, 0x68, 0xe4, 0x32, 0xe3, 0xa1, 0x21, 0x71, + 0x7f, 0xa8, 0x44, 0x30, 0x27, 0xee, 0xdc, 0x87, 0x42, 0x18, 0x07, 0x76, 0x54, 0xb3, 0x48, 0xa8, + 0x8e, 0x78, 0x39, 0x4b, 0xef, 0x15, 0x95, 0x82, 0x15, 0x0a, 0xaf, 0x41, 0xd9, 0xf0, 0xd4, 0xd9, + 0x13, 0x6e, 0x7a, 0x37, 0xbd, 0x57, 0x50, 0x4a, 0x86, 0x17, 0xbd, 0xd9, 0x55, 0xbf, 0x4b, 0x43, + 0x25, 0xf9, 0x04, 0x8d, 0x9b, 0x50, 0x30, 0x6d, 0x8d, 0xf0, 0xd4, 0x12, 0xdf, 0x3f, 0xf6, 0x5e, + 0xf2, 0x6a, 0x5d, 0x6b, 0x07, 0x7c, 0x25, 0xd2, 0xdc, 0xf9, 0x7b, 0x0a, 0x0a, 0x21, 0x8c, 0x2f, + 0x41, 0xd6, 0x21, 0xfe, 0x98, 0x9b, 0xcb, 0x1d, 0xa5, 0x51, 0x4a, 0xe1, 0x6d, 0x86, 0x7b, 0x0e, + 0xb1, 0x78, 0x0a, 0x04, 0x38, 0x6b, 0xb3, 0x79, 0x35, 0x29, 0xd1, 0xf9, 0xe5, 0xc4, 0x9e, 0x4c, + 0xa8, 0xe5, 0x7b, 0xe1, 0xbc, 0x06, 0x78, 0x23, 0x80, 0xf1, 0x87, 0xb0, 0xe9, 0xbb, 0xc4, 0x30, + 0x13, 0xdc, 0x2c, 0xe7, 0xa2, 0x50, 0x10, 0x91, 0x0f, 0xe1, 0x4a, 0x68, 0x57, 0xa7, 0x3e, 0xd1, + 0xc6, 0x54, 0x9f, 0x29, 0xe5, 0xf9, 0xfb, 0xe6, 0xe5, 0x80, 0xd0, 0x0c, 0xe4, 0xa1, 0x6e, 0xf5, + 0xfb, 0x14, 0x6c, 0x86, 0xd7, 0x29, 0x3d, 0x0a, 0xd6, 0x09, 0x00, 0xb1, 0x2c, 0xdb, 0x8f, 0x87, + 0x6b, 0x31, 0x95, 0x17, 0xf4, 0x6a, 0xf5, 0x48, 0x49, 0x89, 0x19, 0xd8, 0x99, 0x00, 0xcc, 0x24, + 0x2b, 0xc3, 0x76, 0x15, 0x4a, 0xc1, 0xf7, 0x05, 0xfe, 0x91, 0x4a, 0x5c, 0xc0, 0x41, 0x40, 0xec, + 0xde, 0x85, 0xb7, 0x21, 0x77, 0x46, 0x47, 0x86, 0x15, 0xbc, 0x7a, 0x8a, 0x46, 0xf8, 0x96, 0x9a, + 0x8d, 0xde, 0x52, 0x8f, 0x7e, 0x97, 0x82, 0x2d, 0xcd, 0x9e, 0xcc, 0xfb, 0x7b, 0x84, 0xe6, 0x5e, + 0x01, 0xbc, 0x2f, 0x52, 0x5f, 0xdd, 0x1d, 0x19, 0xfe, 0x78, 0x7a, 0x56, 0xd3, 0xec, 0xc9, 0xfe, + 0xc8, 0x36, 0x89, 0x35, 0x9a, 0x7d, 0x65, 0xe3, 0x7f, 0xb4, 0x8f, 0x46, 0xd4, 0xfa, 0x68, 0x64, + 0xc7, 0xbe, 0xb9, 0x7d, 0x3e, 0xfb, 0xfb, 0x6d, 0x3a, 0x73, 0xdc, 0x3b, 0xfa, 0x73, 0x7a, 0xe7, + 0x58, 0xf4, 0xd5, 0x0b, 0x63, 0xa3, 0xd0, 0xa1, 0x49, 0x35, 0x36, 0xde, 0xff, 0x05, 0x00, 0x00, + 0xff, 0xff, 0xa2, 0xc3, 0x4e, 0x18, 0xbe, 0x1b, 0x00, 0x00, } diff --git a/src/stackdriver-nozzle/vendor/google.golang.org/genproto/protobuf/descriptor.proto b/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto similarity index 94% rename from src/stackdriver-nozzle/vendor/google.golang.org/genproto/protobuf/descriptor.proto rename to src/stackdriver-nozzle/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto index 28410d4a..70b82a4d 100644 --- a/src/stackdriver-nozzle/vendor/google.golang.org/genproto/protobuf/descriptor.proto +++ b/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/protoc-gen-go/descriptor/descriptor.proto @@ -40,12 +40,11 @@ syntax = "proto2"; package google.protobuf; -option go_package = "descriptor"; +option go_package = "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor"; option java_package = "com.google.protobuf"; option java_outer_classname = "DescriptorProtos"; option csharp_namespace = "Google.Protobuf.Reflection"; option objc_class_prefix = "GPB"; -option java_generate_equals_and_hash = true; // descriptor.proto must be optimized for speed because reflection-based // algorithms don't work during bootstrapping. @@ -140,7 +139,11 @@ message FieldDescriptorProto { TYPE_FIXED32 = 7; TYPE_BOOL = 8; TYPE_STRING = 9; - TYPE_GROUP = 10; // Tag-delimited aggregate. + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. + TYPE_GROUP = 10; TYPE_MESSAGE = 11; // Length-delimited aggregate. // New in version 2. @@ -158,7 +161,6 @@ message FieldDescriptorProto { LABEL_OPTIONAL = 1; LABEL_REQUIRED = 2; LABEL_REPEATED = 3; - // TODO(sanjay): Should we add LABEL_MAP? }; optional string name = 1; @@ -306,19 +308,8 @@ message FileOptions { // top-level extensions defined in the file. optional bool java_multiple_files = 10 [default=false]; - // If set true, then the Java code generator will generate equals() and - // hashCode() methods for all messages defined in the .proto file. - // This increases generated code size, potentially substantially for large - // protos, which may harm a memory-constrained application. - // - In the full runtime this is a speed optimization, as the - // AbstractMessage base class includes reflection-based implementations of - // these methods. - // - In the lite runtime, setting this option changes the semantics of - // equals() and hashCode() to more closely match those of the full runtime; - // the generated methods compute their results based on field values rather - // than object identity. (Implementations should not assume that hashcodes - // will be consistent across runtimes or versions of the protocol compiler.) - optional bool java_generate_equals_and_hash = 20 [default=false]; + // This option does nothing. + optional bool java_generate_equals_and_hash = 20 [deprecated=true]; // If set true, then the Java2 code generator will generate code that // throws an exception whenever an attempt is made to assign a non-UTF-8 @@ -360,6 +351,7 @@ message FileOptions { optional bool cc_generic_services = 16 [default=false]; optional bool java_generic_services = 17 [default=false]; optional bool py_generic_services = 18 [default=false]; + optional bool php_generic_services = 19 [default=false]; // Is this file deprecated? // Depending on the target platform, this can emit Deprecated annotations @@ -379,6 +371,21 @@ message FileOptions { // Namespace for generated classes; defaults to the package. optional string csharp_namespace = 37; + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + optional string swift_prefix = 39; + + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + optional string php_class_prefix = 40; + + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + optional string php_namespace = 41; + // The parser stores options it doesn't recognize here. See above. repeated UninterpretedOption uninterpreted_option = 999; @@ -443,6 +450,9 @@ message MessageOptions { // parser. optional bool map_entry = 7; + reserved 8; // javalite_serializable + reserved 9; // javanano_as_lite + // The parser stores options it doesn't recognize here. See above. repeated UninterpretedOption uninterpreted_option = 999; @@ -471,7 +481,6 @@ message FieldOptions { // false will avoid using packed encoding. optional bool packed = 2; - // The jstype option determines the JavaScript type used for values of the // field. The option is permitted only for 64 bit integral and fixed types // (int64, uint64, sint64, fixed64, sfixed64). By default these types are @@ -512,7 +521,7 @@ message FieldOptions { // // // Note that implementations may choose not to check required fields within - // a lazy sub-message. That is, calling IsInitialized() on the outher message + // a lazy sub-message. That is, calling IsInitialized() on the outer message // may return true even if the inner message has missing required fields. // This is necessary because otherwise the inner message would have to be // parsed in order to perform the check, defeating the purpose of lazy @@ -538,6 +547,8 @@ message FieldOptions { // Clients can define custom options in extensions of this message. See above. extensions 1000 to max; + + reserved 4; // removed jtype } message OneofOptions { @@ -560,6 +571,8 @@ message EnumOptions { // is a formalization for deprecating enums. optional bool deprecated = 3 [default=false]; + reserved 5; // javanano_as_lite + // The parser stores options it doesn't recognize here. See above. repeated UninterpretedOption uninterpreted_option = 999; @@ -614,6 +627,17 @@ message MethodOptions { // this is a formalization for deprecating methods. optional bool deprecated = 33 [default=false]; + // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, + // or neither? HTTP based RPC implementation may choose GET verb for safe + // methods, and PUT verb for idempotent methods instead of the default POST. + enum IdempotencyLevel { + IDEMPOTENCY_UNKNOWN = 0; + NO_SIDE_EFFECTS = 1; // implies idempotent + IDEMPOTENT = 2; // idempotent, but may have side effects + } + optional IdempotencyLevel idempotency_level = + 34 [default=IDEMPOTENCY_UNKNOWN]; + // The parser stores options it doesn't recognize here. See above. repeated UninterpretedOption uninterpreted_option = 999; diff --git a/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/any.go b/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/any.go index 89e07ae1..b2af97f4 100644 --- a/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/any.go +++ b/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/any.go @@ -51,6 +51,9 @@ const googleApis = "type.googleapis.com/" // function. AnyMessageName is provided for less common use cases like filtering a // sequence of Any messages based on a set of allowed message type names. func AnyMessageName(any *any.Any) (string, error) { + if any == nil { + return "", fmt.Errorf("message is nil") + } slash := strings.LastIndex(any.TypeUrl, "/") if slash < 0 { return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) diff --git a/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go b/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go index f2c6906b..6c9a6cf7 100644 --- a/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go +++ b/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/any/any.pb.go @@ -1,12 +1,11 @@ -// Code generated by protoc-gen-go. -// source: github.com/golang/protobuf/ptypes/any/any.proto -// DO NOT EDIT! +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/any.proto /* Package any is a generated protocol buffer package. It is generated from these files: - github.com/golang/protobuf/ptypes/any/any.proto + google/protobuf/any.proto It has these top-level messages: Any @@ -132,24 +131,38 @@ func (*Any) ProtoMessage() {} func (*Any) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } func (*Any) XXX_WellKnownType() string { return "Any" } +func (m *Any) GetTypeUrl() string { + if m != nil { + return m.TypeUrl + } + return "" +} + +func (m *Any) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + func init() { proto.RegisterType((*Any)(nil), "google.protobuf.Any") } -func init() { proto.RegisterFile("github.com/golang/protobuf/ptypes/any/any.proto", fileDescriptor0) } +func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 187 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xd2, 0x4f, 0xcf, 0x2c, 0xc9, - 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, - 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0xcc, - 0xab, 0x04, 0x61, 0x3d, 0xb0, 0xb8, 0x10, 0x7f, 0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x1e, 0x4c, - 0x95, 0x92, 0x19, 0x17, 0xb3, 0x63, 0x5e, 0xa5, 0x90, 0x24, 0x17, 0x07, 0x48, 0x79, 0x7c, 0x69, - 0x51, 0x8e, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x67, 0x10, 0x3b, 0x88, 0x1f, 0x5a, 0x94, 0x23, 0x24, - 0xc2, 0xc5, 0x5a, 0x96, 0x98, 0x53, 0x9a, 0x2a, 0xc1, 0xa4, 0xc0, 0xa8, 0xc1, 0x13, 0x04, 0xe1, - 0x38, 0x15, 0x71, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x19, 0xe7, 0xc4, 0xe1, 0x98, 0x57, 0x19, - 0x00, 0xe2, 0x04, 0x30, 0x46, 0xa9, 0x12, 0xe5, 0xb8, 0x05, 0x8c, 0x8c, 0x8b, 0x98, 0x98, 0xdd, - 0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x4c, 0x0b, 0x80, 0xaa, 0xd2, 0x0b, 0x4f, 0xcd, 0xc9, - 0xf1, 0xce, 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0xa9, 0x4e, 0x62, 0x03, 0x6b, 0x37, 0x06, 0x04, 0x00, - 0x00, 0xff, 0xff, 0xc6, 0x4d, 0x03, 0x23, 0xf6, 0x00, 0x00, 0x00, + // 185 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4, + 0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a, + 0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46, + 0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, + 0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xca, 0xe7, 0x12, 0x4e, 0xce, + 0xcf, 0xd5, 0x43, 0x33, 0xce, 0x89, 0xc3, 0x31, 0xaf, 0x32, 0x00, 0xc4, 0x09, 0x60, 0x8c, 0x52, + 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, + 0x4b, 0x47, 0xb8, 0xa8, 0x00, 0x64, 0x7a, 0x31, 0xc8, 0x61, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, + 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x8c, 0x0a, 0x80, 0x2a, 0xd1, 0x0b, 0x4f, 0xcd, 0xc9, 0xf1, 0xce, + 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0x29, 0x4d, 0x62, 0x03, 0xeb, 0x35, 0x06, 0x04, 0x00, 0x00, 0xff, + 0xff, 0x13, 0xf8, 0xe8, 0x42, 0xdd, 0x00, 0x00, 0x00, } diff --git a/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/any/any.proto b/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/any/any.proto index 81dcf46c..9bd3f50a 100644 --- a/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/any/any.proto +++ b/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/any/any.proto @@ -37,7 +37,6 @@ option go_package = "github.com/golang/protobuf/ptypes/any"; option java_package = "com.google.protobuf"; option java_outer_classname = "AnyProto"; option java_multiple_files = true; -option java_generate_equals_and_hash = true; option objc_class_prefix = "GPB"; // `Any` contains an arbitrary serialized protocol buffer message along with a diff --git a/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go b/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go index 56974834..b2410a09 100644 --- a/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go +++ b/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/duration/duration.pb.go @@ -1,12 +1,11 @@ -// Code generated by protoc-gen-go. -// source: github.com/golang/protobuf/ptypes/duration/duration.proto -// DO NOT EDIT! +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/duration.proto /* Package duration is a generated protocol buffer package. It is generated from these files: - github.com/golang/protobuf/ptypes/duration/duration.proto + google/protobuf/duration.proto It has these top-level messages: Duration @@ -35,6 +34,8 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // two Timestamp values is a Duration and it can be added or subtracted // from a Timestamp. Range is approximately +-10,000 years. // +// # Examples +// // Example 1: Compute Duration from two Timestamps in pseudo code. // // Timestamp start = ...; @@ -69,10 +70,27 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // end.nanos -= 1000000000; // } // +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// // type Duration struct { // Signed seconds of the span of time. Must be from -315,576,000,000 - // to +315,576,000,000 inclusive. + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` // Signed fractions of a second at nanosecond resolution of the span // of time. Durations less than one second are represented with a 0 @@ -89,26 +107,38 @@ func (*Duration) ProtoMessage() {} func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } func (*Duration) XXX_WellKnownType() string { return "Duration" } -func init() { - proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") +func (m *Duration) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Duration) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 } func init() { - proto.RegisterFile("github.com/golang/protobuf/ptypes/duration/duration.proto", fileDescriptor0) + proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") } +func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptor0) } + var fileDescriptor0 = []byte{ - // 189 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9, - 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, - 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0x29, - 0x2d, 0x4a, 0x2c, 0xc9, 0xcc, 0xcf, 0x83, 0x33, 0xf4, 0xc0, 0x2a, 0x84, 0xf8, 0xd3, 0xf3, 0xf3, - 0xd3, 0x73, 0x52, 0xf5, 0x60, 0xea, 0x95, 0xac, 0xb8, 0x38, 0x5c, 0xa0, 0x4a, 0x84, 0x24, 0xb8, - 0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83, 0x60, - 0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d, 0xd6, - 0x20, 0x08, 0xc7, 0xa9, 0x86, 0x4b, 0x38, 0x39, 0x3f, 0x57, 0x0f, 0xcd, 0x48, 0x27, 0x5e, 0x98, - 0x81, 0x01, 0x20, 0x91, 0x00, 0xc6, 0x28, 0x2d, 0xe2, 0xdd, 0xbb, 0x80, 0x91, 0x71, 0x11, 0x13, - 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xb9, 0x01, 0x50, 0xa5, 0x7a, 0xe1, 0xa9, - 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x2d, 0x49, 0x6c, 0x60, 0x33, 0x8c, 0x01, - 0x01, 0x00, 0x00, 0xff, 0xff, 0x62, 0xfb, 0xb1, 0x51, 0x0e, 0x01, 0x00, 0x00, + // 190 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a, + 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56, + 0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5, + 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e, + 0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0xc3, 0x25, 0x9c, 0x9c, + 0x9f, 0xab, 0x87, 0x66, 0xa4, 0x13, 0x2f, 0xcc, 0xc0, 0x00, 0x90, 0x48, 0x00, 0x63, 0x94, 0x56, + 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x4e, 0x62, 0x5e, + 0x3a, 0xc2, 0x7d, 0x05, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x70, 0x67, 0xfe, 0x60, 0x64, 0x5c, 0xc4, + 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, 0x00, 0x54, 0xa9, 0x5e, 0x78, + 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, 0x12, 0x1b, 0xd8, 0x0c, 0x63, + 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0xdc, 0x84, 0x30, 0xff, 0xf3, 0x00, 0x00, 0x00, } diff --git a/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto b/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto index 96c1796d..975fce41 100644 --- a/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto +++ b/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/duration/duration.proto @@ -33,11 +33,11 @@ syntax = "proto3"; package google.protobuf; option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; option go_package = "github.com/golang/protobuf/ptypes/duration"; option java_package = "com.google.protobuf"; option java_outer_classname = "DurationProto"; option java_multiple_files = true; -option java_generate_equals_and_hash = true; option objc_class_prefix = "GPB"; // A Duration represents a signed, fixed-length span of time represented @@ -47,6 +47,8 @@ option objc_class_prefix = "GPB"; // two Timestamp values is a Duration and it can be added or subtracted // from a Timestamp. Range is approximately +-10,000 years. // +// # Examples +// // Example 1: Compute Duration from two Timestamps in pseudo code. // // Timestamp start = ...; @@ -81,11 +83,28 @@ option objc_class_prefix = "GPB"; // end.nanos -= 1000000000; // } // +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// // message Duration { // Signed seconds of the span of time. Must be from -315,576,000,000 - // to +315,576,000,000 inclusive. + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years int64 seconds = 1; // Signed fractions of a second at nanosecond resolution of the span diff --git a/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go b/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go index 46c765a9..e877b72c 100644 --- a/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go +++ b/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/empty/empty.pb.go @@ -1,12 +1,11 @@ -// Code generated by protoc-gen-go. -// source: github.com/golang/protobuf/ptypes/empty/empty.proto -// DO NOT EDIT! +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/empty.proto /* Package empty is a generated protocol buffer package. It is generated from these files: - github.com/golang/protobuf/ptypes/empty/empty.proto + google/protobuf/empty.proto It has these top-level messages: Empty @@ -50,20 +49,18 @@ func init() { proto.RegisterType((*Empty)(nil), "google.protobuf.Empty") } -func init() { - proto.RegisterFile("github.com/golang/protobuf/ptypes/empty/empty.proto", fileDescriptor0) -} +func init() { proto.RegisterFile("google/protobuf/empty.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 150 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x32, 0x4e, 0xcf, 0x2c, 0xc9, - 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, - 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x4f, 0xcd, - 0x2d, 0x28, 0xa9, 0x84, 0x90, 0x7a, 0x60, 0x39, 0x21, 0xfe, 0xf4, 0xfc, 0xfc, 0xf4, 0x9c, 0x54, - 0x3d, 0x98, 0x4a, 0x25, 0x76, 0x2e, 0x56, 0x57, 0x90, 0xbc, 0x53, 0x25, 0x97, 0x70, 0x72, 0x7e, - 0xae, 0x1e, 0x9a, 0xbc, 0x13, 0x17, 0x58, 0x36, 0x00, 0xc4, 0x0d, 0x60, 0x8c, 0x52, 0x27, 0xd2, - 0xce, 0x05, 0x8c, 0x8c, 0x3f, 0x18, 0x19, 0x17, 0x31, 0x31, 0xbb, 0x07, 0x38, 0xad, 0x62, 0x92, - 0x73, 0x87, 0x18, 0x1a, 0x00, 0x55, 0xaa, 0x17, 0x9e, 0x9a, 0x93, 0xe3, 0x9d, 0x97, 0x5f, 0x9e, - 0x17, 0x02, 0xd2, 0x92, 0xc4, 0x06, 0x36, 0xc3, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x7f, 0xbb, - 0xf4, 0x0e, 0xd2, 0x00, 0x00, 0x00, + // 148 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcd, 0x2d, 0x28, + 0xa9, 0xd4, 0x03, 0x73, 0x85, 0xf8, 0x21, 0x92, 0x7a, 0x30, 0x49, 0x25, 0x76, 0x2e, 0x56, 0x57, + 0x90, 0xbc, 0x53, 0x19, 0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0xbc, 0x13, 0x17, 0x58, 0x36, + 0x00, 0xc4, 0x0d, 0x60, 0x8c, 0x52, 0x4f, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, + 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0x47, 0x58, 0x53, 0x50, 0x52, 0x59, 0x90, 0x5a, 0x0c, + 0xb1, 0xed, 0x07, 0x23, 0xe3, 0x22, 0x26, 0x66, 0xf7, 0x00, 0xa7, 0x55, 0x4c, 0x72, 0xee, 0x10, + 0x13, 0x03, 0xa0, 0xea, 0xf4, 0xc2, 0x53, 0x73, 0x72, 0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40, + 0xea, 0x93, 0xd8, 0xc0, 0x06, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x64, 0xd4, 0xb3, 0xa6, + 0xb7, 0x00, 0x00, 0x00, } diff --git a/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto b/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto index 37f4cd10..03cacd23 100644 --- a/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto +++ b/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/empty/empty.proto @@ -37,7 +37,6 @@ option go_package = "github.com/golang/protobuf/ptypes/empty"; option java_package = "com.google.protobuf"; option java_outer_classname = "EmptyProto"; option java_multiple_files = true; -option java_generate_equals_and_hash = true; option objc_class_prefix = "GPB"; option cc_enable_arenas = true; diff --git a/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/regen.sh b/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/regen.sh index 2a5b4e8b..b50a9414 100755 --- a/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/regen.sh +++ b/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/regen.sh @@ -8,14 +8,7 @@ PKG=github.com/golang/protobuf/ptypes UPSTREAM=https://github.com/google/protobuf UPSTREAM_SUBDIR=src/google/protobuf -PROTO_FILES=' - any.proto - duration.proto - empty.proto - struct.proto - timestamp.proto - wrappers.proto -' +PROTO_FILES=(any duration empty struct timestamp wrappers) function die() { echo 1>&2 $* @@ -36,31 +29,15 @@ pkgdir=$(go list -f '{{.Dir}}' $PKG) echo 1>&2 $pkgdir base=$(echo $pkgdir | sed "s,/$PKG\$,,") echo 1>&2 "base: $base" -cd $base +cd "$base" echo 1>&2 "fetching latest protos... " git clone -q $UPSTREAM $tmpdir -# Pass 1: build mapping from upstream filename to our filename. -declare -A filename_map -for f in $(cd $PKG && find * -name '*.proto'); do - echo -n 1>&2 "looking for latest version of $f... " - up=$(cd $tmpdir/$UPSTREAM_SUBDIR && find * -name $(basename $f) | grep -v /testdata/) - echo 1>&2 $up - if [ $(echo $up | wc -w) != "1" ]; then - die "not exactly one match" - fi - filename_map[$up]=$f -done -# Pass 2: copy files -for up in "${!filename_map[@]}"; do - f=${filename_map[$up]} - shortname=$(basename $f | sed 's,\.proto$,,') - cp $tmpdir/$UPSTREAM_SUBDIR/$up $PKG/$f -done -# Run protoc once per package. -for dir in $(find $PKG -name '*.proto' | xargs dirname | sort | uniq); do - echo 1>&2 "* $dir" - protoc --go_out=. $dir/*.proto +for file in ${PROTO_FILES[@]}; do + echo 1>&2 "* $file" + protoc --go_out=. -I$tmpdir/src $tmpdir/src/google/protobuf/$file.proto || die + cp $tmpdir/src/google/protobuf/$file.proto $PKG/$file done + echo 1>&2 "All OK" diff --git a/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go b/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go index 197042ed..4cfe6081 100644 --- a/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go +++ b/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/struct/struct.pb.go @@ -1,12 +1,11 @@ -// Code generated by protoc-gen-go. -// source: github.com/golang/protobuf/ptypes/struct/struct.proto -// DO NOT EDIT! +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/struct.proto /* Package structpb is a generated protocol buffer package. It is generated from these files: - github.com/golang/protobuf/ptypes/struct/struct.proto + google/protobuf/struct.proto It has these top-level messages: Struct @@ -347,36 +346,35 @@ func init() { proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value) } -func init() { - proto.RegisterFile("github.com/golang/protobuf/ptypes/struct/struct.proto", fileDescriptor0) -} +func init() { proto.RegisterFile("google/protobuf/struct.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 416 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x8c, 0x92, 0x41, 0x8b, 0xd3, 0x40, - 0x14, 0x80, 0x3b, 0xc9, 0x36, 0x98, 0x17, 0x59, 0x97, 0x11, 0xb4, 0xac, 0xa0, 0xa1, 0x7b, 0x09, - 0x22, 0x09, 0x56, 0x04, 0x31, 0x5e, 0x0c, 0xac, 0xbb, 0x60, 0x58, 0x62, 0x74, 0x57, 0xf0, 0x52, - 0x9a, 0x34, 0x8d, 0xa1, 0xd3, 0x99, 0x90, 0xcc, 0x28, 0x3d, 0xfa, 0x2f, 0x3c, 0x8a, 0x47, 0x8f, - 0xfe, 0x42, 0x99, 0x99, 0x24, 0x4a, 0x4b, 0xc1, 0xd3, 0xf4, 0xbd, 0xf9, 0xde, 0x37, 0xef, 0xbd, - 0x06, 0x9e, 0x97, 0x15, 0xff, 0x2c, 0x32, 0x3f, 0x67, 0x9b, 0xa0, 0x64, 0x64, 0x41, 0xcb, 0xa0, - 0x6e, 0x18, 0x67, 0x99, 0x58, 0x05, 0x35, 0xdf, 0xd6, 0x45, 0x1b, 0xb4, 0xbc, 0x11, 0x39, 0xef, - 0x0e, 0x5f, 0xdd, 0xe2, 0x3b, 0x25, 0x63, 0x25, 0x29, 0xfc, 0x9e, 0x9d, 0x7e, 0x47, 0x60, 0xbd, - 0x57, 0x04, 0x0e, 0xc1, 0x5a, 0x55, 0x05, 0x59, 0xb6, 0x13, 0xe4, 0x9a, 0x9e, 0x33, 0x3b, 0xf3, - 0x77, 0x60, 0x5f, 0x83, 0xfe, 0x1b, 0x45, 0x9d, 0x53, 0xde, 0x6c, 0xd3, 0xae, 0xe4, 0xf4, 0x1d, - 0x38, 0xff, 0xa4, 0xf1, 0x09, 0x98, 0xeb, 0x62, 0x3b, 0x41, 0x2e, 0xf2, 0xec, 0x54, 0xfe, 0xc4, - 0x4f, 0x60, 0xfc, 0x65, 0x41, 0x44, 0x31, 0x31, 0x5c, 0xe4, 0x39, 0xb3, 0x7b, 0x7b, 0xf2, 0x1b, - 0x79, 0x9b, 0x6a, 0xe8, 0xa5, 0xf1, 0x02, 0x4d, 0x7f, 0x1b, 0x30, 0x56, 0x49, 0x1c, 0x02, 0x50, - 0x41, 0xc8, 0x5c, 0x0b, 0xa4, 0xf4, 0x78, 0x76, 0xba, 0x27, 0xb8, 0x12, 0x84, 0x28, 0xfe, 0x72, - 0x94, 0xda, 0xb4, 0x0f, 0xf0, 0x19, 0xdc, 0xa6, 0x62, 0x93, 0x15, 0xcd, 0xfc, 0xef, 0xfb, 0xe8, - 0x72, 0x94, 0x3a, 0x3a, 0x3b, 0x40, 0x2d, 0x6f, 0x2a, 0x5a, 0x76, 0x90, 0x29, 0x1b, 0x97, 0x90, - 0xce, 0x6a, 0xe8, 0x11, 0x40, 0xc6, 0x58, 0xdf, 0xc6, 0x91, 0x8b, 0xbc, 0x5b, 0xf2, 0x29, 0x99, - 0xd3, 0xc0, 0x2b, 0x65, 0x11, 0x39, 0xef, 0x90, 0xb1, 0x1a, 0xf5, 0xfe, 0x81, 0x3d, 0x76, 0x7a, - 0x91, 0xf3, 0x61, 0x4a, 0x52, 0xb5, 0x7d, 0xad, 0xa5, 0x6a, 0xf7, 0xa7, 0x8c, 0xab, 0x96, 0x0f, - 0x53, 0x92, 0x3e, 0x88, 0x2c, 0x38, 0x5a, 0x57, 0x74, 0x39, 0x0d, 0xc1, 0x1e, 0x08, 0xec, 0x83, - 0xa5, 0x64, 0xfd, 0x3f, 0x7a, 0x68, 0xe9, 0x1d, 0xf5, 0xf8, 0x01, 0xd8, 0xc3, 0x12, 0xf1, 0x31, - 0xc0, 0xd5, 0x75, 0x1c, 0xcf, 0x6f, 0x5e, 0xc7, 0xd7, 0xe7, 0x27, 0xa3, 0xe8, 0x1b, 0x82, 0xbb, - 0x39, 0xdb, 0xec, 0x2a, 0x22, 0x47, 0x4f, 0x93, 0xc8, 0x38, 0x41, 0x9f, 0x9e, 0xfe, 0xef, 0x87, - 0x19, 0xea, 0xa3, 0xce, 0x7e, 0x20, 0xf4, 0xd3, 0x30, 0x2f, 0x92, 0xe8, 0x97, 0xf1, 0xf0, 0x42, - 0xcb, 0x93, 0xbe, 0xbf, 0x8f, 0x05, 0x21, 0x6f, 0x29, 0xfb, 0x4a, 0x3f, 0xc8, 0xca, 0xcc, 0x52, - 0xaa, 0x67, 0x7f, 0x02, 0x00, 0x00, 0xff, 0xff, 0xbc, 0xcf, 0x6d, 0x50, 0xfe, 0x02, 0x00, 0x00, + // 417 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0x41, 0x8b, 0xd3, 0x40, + 0x14, 0xc7, 0x3b, 0xc9, 0x36, 0x98, 0x17, 0x59, 0x97, 0x11, 0xb4, 0xac, 0xa2, 0xa1, 0x7b, 0x09, + 0x22, 0x29, 0xd6, 0x8b, 0x18, 0x2f, 0x06, 0xd6, 0x5d, 0x30, 0x2c, 0x31, 0xba, 0x15, 0xbc, 0x94, + 0x26, 0x4d, 0x63, 0xe8, 0x74, 0x26, 0x24, 0x33, 0x4a, 0x8f, 0x7e, 0x0b, 0xcf, 0x1e, 0x3d, 0xfa, + 0xe9, 0x3c, 0xca, 0xcc, 0x24, 0xa9, 0xb4, 0xf4, 0x94, 0xbc, 0xf7, 0x7e, 0xef, 0x3f, 0xef, 0xff, + 0x66, 0xe0, 0x71, 0xc1, 0x58, 0x41, 0xf2, 0x49, 0x55, 0x33, 0xce, 0x52, 0xb1, 0x9a, 0x34, 0xbc, + 0x16, 0x19, 0xf7, 0x55, 0x8c, 0xef, 0xe9, 0xaa, 0xdf, 0x55, 0xc7, 0x3f, 0x11, 0x58, 0x1f, 0x15, + 0x81, 0x03, 0xb0, 0x56, 0x65, 0x4e, 0x96, 0xcd, 0x08, 0xb9, 0xa6, 0xe7, 0x4c, 0x2f, 0xfc, 0x3d, + 0xd8, 0xd7, 0xa0, 0xff, 0x4e, 0x51, 0x97, 0x94, 0xd7, 0xdb, 0xa4, 0x6d, 0x39, 0xff, 0x00, 0xce, + 0x7f, 0x69, 0x7c, 0x06, 0xe6, 0x3a, 0xdf, 0x8e, 0x90, 0x8b, 0x3c, 0x3b, 0x91, 0xbf, 0xf8, 0x39, + 0x0c, 0xbf, 0x2d, 0x88, 0xc8, 0x47, 0x86, 0x8b, 0x3c, 0x67, 0xfa, 0xe0, 0x40, 0x7c, 0x26, 0xab, + 0x89, 0x86, 0x5e, 0x1b, 0xaf, 0xd0, 0xf8, 0x8f, 0x01, 0x43, 0x95, 0xc4, 0x01, 0x00, 0x15, 0x84, + 0xcc, 0xb5, 0x80, 0x14, 0x3d, 0x9d, 0x9e, 0x1f, 0x08, 0xdc, 0x08, 0x42, 0x14, 0x7f, 0x3d, 0x48, + 0x6c, 0xda, 0x05, 0xf8, 0x02, 0xee, 0x52, 0xb1, 0x49, 0xf3, 0x7a, 0xbe, 0x3b, 0x1f, 0x5d, 0x0f, + 0x12, 0x47, 0x67, 0x7b, 0xa8, 0xe1, 0x75, 0x49, 0x8b, 0x16, 0x32, 0xe5, 0xe0, 0x12, 0xd2, 0x59, + 0x0d, 0x3d, 0x05, 0x48, 0x19, 0xeb, 0xc6, 0x38, 0x71, 0x91, 0x77, 0x47, 0x1e, 0x25, 0x73, 0x1a, + 0x78, 0xa3, 0x54, 0x44, 0xc6, 0x5b, 0x64, 0xa8, 0xac, 0x3e, 0x3c, 0xb2, 0xc7, 0x56, 0x5e, 0x64, + 0xbc, 0x77, 0x49, 0xca, 0xa6, 0xeb, 0xb5, 0x54, 0xef, 0xa1, 0xcb, 0xa8, 0x6c, 0x78, 0xef, 0x92, + 0x74, 0x41, 0x68, 0xc1, 0xc9, 0xba, 0xa4, 0xcb, 0x71, 0x00, 0x76, 0x4f, 0x60, 0x1f, 0x2c, 0x25, + 0xd6, 0xdd, 0xe8, 0xb1, 0xa5, 0xb7, 0xd4, 0xb3, 0x47, 0x60, 0xf7, 0x4b, 0xc4, 0xa7, 0x00, 0x37, + 0xb7, 0x51, 0x34, 0x9f, 0xbd, 0x8d, 0x6e, 0x2f, 0xcf, 0x06, 0xe1, 0x0f, 0x04, 0xf7, 0x33, 0xb6, + 0xd9, 0x97, 0x08, 0x1d, 0xed, 0x26, 0x96, 0x71, 0x8c, 0xbe, 0xbc, 0x28, 0x4a, 0xfe, 0x55, 0xa4, + 0x7e, 0xc6, 0x36, 0x93, 0x82, 0x91, 0x05, 0x2d, 0x76, 0x4f, 0xb1, 0xe2, 0xdb, 0x2a, 0x6f, 0xda, + 0x17, 0x19, 0xe8, 0x4f, 0x95, 0xfe, 0x45, 0xe8, 0x97, 0x61, 0x5e, 0xc5, 0xe1, 0x6f, 0xe3, 0xc9, + 0x95, 0x16, 0x8f, 0xbb, 0xf9, 0x3e, 0xe7, 0x84, 0xbc, 0xa7, 0xec, 0x3b, 0xfd, 0x24, 0x3b, 0x53, + 0x4b, 0x49, 0xbd, 0xfc, 0x17, 0x00, 0x00, 0xff, 0xff, 0xe8, 0x1b, 0x59, 0xf8, 0xe5, 0x02, 0x00, + 0x00, } diff --git a/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto b/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto index beeba811..7d7808e7 100644 --- a/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto +++ b/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/struct/struct.proto @@ -33,11 +33,11 @@ syntax = "proto3"; package google.protobuf; option csharp_namespace = "Google.Protobuf.WellKnownTypes"; +option cc_enable_arenas = true; option go_package = "github.com/golang/protobuf/ptypes/struct;structpb"; option java_package = "com.google.protobuf"; option java_outer_classname = "StructProto"; option java_multiple_files = true; -option java_generate_equals_and_hash = true; option objc_class_prefix = "GPB"; diff --git a/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/timestamp.go b/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/timestamp.go index 1b365762..47f10dbc 100644 --- a/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/timestamp.go +++ b/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/timestamp.go @@ -99,6 +99,15 @@ func Timestamp(ts *tspb.Timestamp) (time.Time, error) { return t, validateTimestamp(ts) } +// TimestampNow returns a google.protobuf.Timestamp for the current time. +func TimestampNow() *tspb.Timestamp { + ts, err := TimestampProto(time.Now()) + if err != nil { + panic("ptypes: time.Now() out of Timestamp range") + } + return ts +} + // TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. // It returns an error if the resulting Timestamp is invalid. func TimestampProto(t time.Time) (*tspb.Timestamp, error) { diff --git a/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go b/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go index ffcc5159..e23e4a25 100644 --- a/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go +++ b/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go @@ -1,12 +1,11 @@ -// Code generated by protoc-gen-go. -// source: github.com/golang/protobuf/ptypes/timestamp/timestamp.proto -// DO NOT EDIT! +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/timestamp.proto /* Package timestamp is a generated protocol buffer package. It is generated from these files: - github.com/golang/protobuf/ptypes/timestamp/timestamp.proto + google/protobuf/timestamp.proto It has these top-level messages: Timestamp @@ -40,6 +39,8 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // and from RFC 3339 date strings. // See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). // +// # Examples +// // Example 1: Compute Timestamp from POSIX `time()`. // // Timestamp timestamp; @@ -77,15 +78,36 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // // Example 5: Compute Timestamp from current time in Python. // -// now = time.time() -// seconds = int(now) -// nanos = int((now - seconds) * 10**9) -// timestamp = Timestamp(seconds=seconds, nanos=nanos) +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required, though only UTC (as indicated by "Z") is presently supported. +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) +// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one +// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()) +// to obtain a formatter capable of generating timestamps in this format. // // type Timestamp struct { // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to // 9999-12-31T23:59:59Z inclusive. Seconds int64 `protobuf:"varint,1,opt,name=seconds" json:"seconds,omitempty"` // Non-negative fractions of a second at nanosecond resolution. Negative @@ -101,27 +123,38 @@ func (*Timestamp) ProtoMessage() {} func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } -func init() { - proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") +func (m *Timestamp) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Timestamp) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 } func init() { - proto.RegisterFile("github.com/golang/protobuf/ptypes/timestamp/timestamp.proto", fileDescriptor0) + proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") } +func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptor0) } + var fileDescriptor0 = []byte{ - // 194 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xb2, 0x4e, 0xcf, 0x2c, 0xc9, - 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, - 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2f, 0xc9, - 0xcc, 0x4d, 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0x40, 0xb0, 0xf4, 0xc0, 0x6a, 0x84, 0xf8, 0xd3, 0xf3, - 0xf3, 0xd3, 0x73, 0x52, 0xf5, 0x60, 0x3a, 0x94, 0xac, 0xb9, 0x38, 0x43, 0x60, 0x6a, 0x84, 0x24, - 0xb8, 0xd8, 0x8b, 0x53, 0x93, 0xf3, 0xf3, 0x52, 0x8a, 0x25, 0x18, 0x15, 0x18, 0x35, 0x98, 0x83, - 0x60, 0x5c, 0x21, 0x11, 0x2e, 0xd6, 0xbc, 0xc4, 0xbc, 0xfc, 0x62, 0x09, 0x26, 0x05, 0x46, 0x0d, - 0xd6, 0x20, 0x08, 0xc7, 0xa9, 0x91, 0x91, 0x4b, 0x38, 0x39, 0x3f, 0x57, 0x0f, 0xcd, 0x50, 0x27, - 0x3e, 0xb8, 0x91, 0x01, 0x20, 0xa1, 0x00, 0xc6, 0x28, 0x6d, 0x12, 0x1c, 0xbd, 0x80, 0x91, 0xf1, - 0x07, 0x23, 0xe3, 0x22, 0x26, 0x66, 0xf7, 0x00, 0xa7, 0x55, 0x4c, 0x72, 0xee, 0x10, 0xc3, 0x03, - 0xa0, 0xca, 0xf5, 0xc2, 0x53, 0x73, 0x72, 0xbc, 0xf3, 0xf2, 0xcb, 0xf3, 0x42, 0x40, 0xda, 0x92, - 0xd8, 0xc0, 0xe6, 0x18, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, 0x17, 0x5f, 0xb7, 0xdc, 0x17, 0x01, - 0x00, 0x00, + // 191 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d, + 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28, + 0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5, + 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89, + 0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x1d, 0x97, 0x70, + 0x72, 0x7e, 0xae, 0x1e, 0x9a, 0x99, 0x4e, 0x7c, 0x70, 0x13, 0x03, 0x40, 0x42, 0x01, 0x8c, 0x51, + 0xda, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, 0xe9, 0xf9, 0x39, 0x89, + 0x79, 0xe9, 0x08, 0x27, 0x16, 0x94, 0x54, 0x16, 0xa4, 0x16, 0x23, 0x5c, 0xfa, 0x83, 0x91, 0x71, + 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, 0xc9, 0x01, 0x50, 0xb5, 0x7a, + 0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, 0x3d, 0x49, 0x6c, 0x60, 0x43, + 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x77, 0x4a, 0x07, 0xf7, 0x00, 0x00, 0x00, } diff --git a/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto b/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto index 7992a858..b7cbd175 100644 --- a/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto +++ b/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto @@ -38,7 +38,6 @@ option go_package = "github.com/golang/protobuf/ptypes/timestamp"; option java_package = "com.google.protobuf"; option java_outer_classname = "TimestampProto"; option java_multiple_files = true; -option java_generate_equals_and_hash = true; option objc_class_prefix = "GPB"; // A Timestamp represents a point in time independent of any time zone @@ -53,6 +52,8 @@ option objc_class_prefix = "GPB"; // and from RFC 3339 date strings. // See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). // +// # Examples +// // Example 1: Compute Timestamp from POSIX `time()`. // // Timestamp timestamp; @@ -90,16 +91,37 @@ option objc_class_prefix = "GPB"; // // Example 5: Compute Timestamp from current time in Python. // -// now = time.time() -// seconds = int(now) -// nanos = int((now - seconds) * 10**9) -// timestamp = Timestamp(seconds=seconds, nanos=nanos) +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required, though only UTC (as indicated by "Z") is presently supported. +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) +// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one +// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://joda-time.sourceforge.net/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime()) +// to obtain a formatter capable of generating timestamps in this format. // // message Timestamp { // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from from 0001-01-01T00:00:00Z to + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to // 9999-12-31T23:59:59Z inclusive. int64 seconds = 1; diff --git a/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go b/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go index 5e52a81c..0ed59bf1 100644 --- a/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go +++ b/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.pb.go @@ -1,12 +1,11 @@ -// Code generated by protoc-gen-go. -// source: github.com/golang/protobuf/ptypes/wrappers/wrappers.proto -// DO NOT EDIT! +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/protobuf/wrappers.proto /* Package wrappers is a generated protocol buffer package. It is generated from these files: - github.com/golang/protobuf/ptypes/wrappers/wrappers.proto + google/protobuf/wrappers.proto It has these top-level messages: DoubleValue @@ -50,6 +49,13 @@ func (*DoubleValue) ProtoMessage() {} func (*DoubleValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" } +func (m *DoubleValue) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + // Wrapper message for `float`. // // The JSON representation for `FloatValue` is JSON number. @@ -64,6 +70,13 @@ func (*FloatValue) ProtoMessage() {} func (*FloatValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" } +func (m *FloatValue) GetValue() float32 { + if m != nil { + return m.Value + } + return 0 +} + // Wrapper message for `int64`. // // The JSON representation for `Int64Value` is JSON string. @@ -78,6 +91,13 @@ func (*Int64Value) ProtoMessage() {} func (*Int64Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" } +func (m *Int64Value) GetValue() int64 { + if m != nil { + return m.Value + } + return 0 +} + // Wrapper message for `uint64`. // // The JSON representation for `UInt64Value` is JSON string. @@ -92,6 +112,13 @@ func (*UInt64Value) ProtoMessage() {} func (*UInt64Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" } +func (m *UInt64Value) GetValue() uint64 { + if m != nil { + return m.Value + } + return 0 +} + // Wrapper message for `int32`. // // The JSON representation for `Int32Value` is JSON number. @@ -106,6 +133,13 @@ func (*Int32Value) ProtoMessage() {} func (*Int32Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" } +func (m *Int32Value) GetValue() int32 { + if m != nil { + return m.Value + } + return 0 +} + // Wrapper message for `uint32`. // // The JSON representation for `UInt32Value` is JSON number. @@ -120,6 +154,13 @@ func (*UInt32Value) ProtoMessage() {} func (*UInt32Value) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" } +func (m *UInt32Value) GetValue() uint32 { + if m != nil { + return m.Value + } + return 0 +} + // Wrapper message for `bool`. // // The JSON representation for `BoolValue` is JSON `true` and `false`. @@ -134,6 +175,13 @@ func (*BoolValue) ProtoMessage() {} func (*BoolValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" } +func (m *BoolValue) GetValue() bool { + if m != nil { + return m.Value + } + return false +} + // Wrapper message for `string`. // // The JSON representation for `StringValue` is JSON string. @@ -148,6 +196,13 @@ func (*StringValue) ProtoMessage() {} func (*StringValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } func (*StringValue) XXX_WellKnownType() string { return "StringValue" } +func (m *StringValue) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + // Wrapper message for `bytes`. // // The JSON representation for `BytesValue` is JSON string. @@ -162,6 +217,13 @@ func (*BytesValue) ProtoMessage() {} func (*BytesValue) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" } +func (m *BytesValue) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + func init() { proto.RegisterType((*DoubleValue)(nil), "google.protobuf.DoubleValue") proto.RegisterType((*FloatValue)(nil), "google.protobuf.FloatValue") @@ -174,27 +236,25 @@ func init() { proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue") } -func init() { - proto.RegisterFile("github.com/golang/protobuf/ptypes/wrappers/wrappers.proto", fileDescriptor0) -} +func init() { proto.RegisterFile("google/protobuf/wrappers.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 260 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xb2, 0x4c, 0xcf, 0x2c, 0xc9, - 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x4f, 0xcf, 0xcf, 0x49, 0xcc, 0x4b, 0xd7, 0x2f, 0x28, - 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x28, 0xa9, 0x2c, 0x48, 0x2d, 0xd6, 0x2f, 0x2f, - 0x4a, 0x2c, 0x28, 0x48, 0x2d, 0x42, 0x30, 0xf4, 0xc0, 0x2a, 0x84, 0xf8, 0xd3, 0xf3, 0xf3, 0xd3, - 0x73, 0x52, 0xf5, 0x60, 0xea, 0x95, 0x94, 0xb9, 0xb8, 0x5d, 0xf2, 0x4b, 0x93, 0x72, 0x52, 0xc3, - 0x12, 0x73, 0x4a, 0x53, 0x85, 0x44, 0xb8, 0x58, 0xcb, 0x40, 0x0c, 0x09, 0x46, 0x05, 0x46, 0x0d, - 0xc6, 0x20, 0x08, 0x47, 0x49, 0x89, 0x8b, 0xcb, 0x2d, 0x27, 0x3f, 0xb1, 0x04, 0x8b, 0x1a, 0x26, - 0x24, 0x35, 0x9e, 0x79, 0x25, 0x66, 0x26, 0x58, 0xd4, 0x30, 0xc3, 0xd4, 0x28, 0x73, 0x71, 0x87, - 0xe2, 0x52, 0xc4, 0x82, 0x6a, 0x90, 0xb1, 0x11, 0x16, 0x35, 0xac, 0x68, 0x06, 0x61, 0x55, 0xc4, - 0x0b, 0x53, 0xa4, 0xc8, 0xc5, 0xe9, 0x94, 0x9f, 0x9f, 0x83, 0x45, 0x09, 0x07, 0x92, 0x39, 0xc1, - 0x25, 0x45, 0x99, 0x79, 0xe9, 0x58, 0x14, 0x71, 0x22, 0x39, 0xc8, 0xa9, 0xb2, 0x24, 0xb5, 0x18, - 0x8b, 0x1a, 0x1e, 0xa8, 0x1a, 0xa7, 0x7a, 0x2e, 0xe1, 0xe4, 0xfc, 0x5c, 0x3d, 0xb4, 0xd0, 0x75, - 0xe2, 0x0d, 0x87, 0x06, 0x7f, 0x00, 0x48, 0x24, 0x80, 0x31, 0x4a, 0x8b, 0xf8, 0xa8, 0x5b, 0xc0, - 0xc8, 0xf8, 0x83, 0x91, 0x71, 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, - 0xd1, 0x01, 0x50, 0xd5, 0x7a, 0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, 0xe5, 0x79, 0x21, 0x20, - 0x5d, 0x49, 0x6c, 0x60, 0x63, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xa9, 0xdf, 0x64, 0x4b, - 0x1c, 0x02, 0x00, 0x00, + // 259 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x2f, 0x4a, 0x2c, + 0x28, 0x48, 0x2d, 0x2a, 0xd6, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0xca, + 0x5c, 0xdc, 0x2e, 0xf9, 0xa5, 0x49, 0x39, 0xa9, 0x61, 0x89, 0x39, 0xa5, 0xa9, 0x42, 0x22, 0x5c, + 0xac, 0x65, 0x20, 0x86, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x63, 0x10, 0x84, 0xa3, 0xa4, 0xc4, 0xc5, + 0xe5, 0x96, 0x93, 0x9f, 0x58, 0x82, 0x45, 0x0d, 0x13, 0x92, 0x1a, 0xcf, 0xbc, 0x12, 0x33, 0x13, + 0x2c, 0x6a, 0x98, 0x61, 0x6a, 0x94, 0xb9, 0xb8, 0x43, 0x71, 0x29, 0x62, 0x41, 0x35, 0xc8, 0xd8, + 0x08, 0x8b, 0x1a, 0x56, 0x34, 0x83, 0xb0, 0x2a, 0xe2, 0x85, 0x29, 0x52, 0xe4, 0xe2, 0x74, 0xca, + 0xcf, 0xcf, 0xc1, 0xa2, 0x84, 0x03, 0xc9, 0x9c, 0xe0, 0x92, 0xa2, 0xcc, 0xbc, 0x74, 0x2c, 0x8a, + 0x38, 0x91, 0x1c, 0xe4, 0x54, 0x59, 0x92, 0x5a, 0x8c, 0x45, 0x0d, 0x0f, 0x54, 0x8d, 0x53, 0x0d, + 0x97, 0x70, 0x72, 0x7e, 0xae, 0x1e, 0x5a, 0xe8, 0x3a, 0xf1, 0x86, 0x43, 0x83, 0x3f, 0x00, 0x24, + 0x12, 0xc0, 0x18, 0xa5, 0x95, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x9f, + 0x9e, 0x9f, 0x93, 0x98, 0x97, 0x8e, 0x88, 0xaa, 0x82, 0x92, 0xca, 0x82, 0xd4, 0x62, 0x78, 0x8c, + 0xfd, 0x60, 0x64, 0x5c, 0xc4, 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0x62, 0x6e, + 0x00, 0x54, 0xa9, 0x5e, 0x78, 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x4b, + 0x12, 0x1b, 0xd8, 0x0c, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x19, 0x6c, 0xb9, 0xb8, 0xfe, + 0x01, 0x00, 0x00, } diff --git a/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto b/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto index 4828ad9a..01947639 100644 --- a/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto +++ b/src/stackdriver-nozzle/vendor/github.com/golang/protobuf/ptypes/wrappers/wrappers.proto @@ -43,7 +43,6 @@ option go_package = "github.com/golang/protobuf/ptypes/wrappers"; option java_package = "com.google.protobuf"; option java_outer_classname = "WrappersProto"; option java_multiple_files = true; -option java_generate_equals_and_hash = true; option objc_class_prefix = "GPB"; // Wrapper message for `double`. diff --git a/src/stackdriver-nozzle/vendor/github.com/googleapis/gax-go/README.md b/src/stackdriver-nozzle/vendor/github.com/googleapis/gax-go/README.md index 38ebdcf6..3cedd5be 100644 --- a/src/stackdriver-nozzle/vendor/github.com/googleapis/gax-go/README.md +++ b/src/stackdriver-nozzle/vendor/github.com/googleapis/gax-go/README.md @@ -8,4 +8,17 @@ Google API Extensions for Go (gax-go) is a set of modules which aids the development of APIs for clients and servers based on `gRPC` and Google API conventions. -This project is currently experimental and not supported. +Application code will rarely need to use this library directly, +but the code generated automatically from API definition files can use it +to simplify code generation and to provide more convenient and idiomatic API surface. + +**This project is currently experimental and not supported.** + +Go Versions +=========== +This library requires Go 1.6 or above. + +License +======= +BSD - please see [LICENSE](https://github.com/googleapis/gax-go/blob/master/LICENSE) +for more information. diff --git a/src/stackdriver-nozzle/vendor/github.com/googleapis/gax-go/call_option.go b/src/stackdriver-nozzle/vendor/github.com/googleapis/gax-go/call_option.go index 4ba1cdfe..7b621643 100644 --- a/src/stackdriver-nozzle/vendor/github.com/googleapis/gax-go/call_option.go +++ b/src/stackdriver-nozzle/vendor/github.com/googleapis/gax-go/call_option.go @@ -35,6 +35,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) // CallOption is an option used by Invoke to control behaviors of RPC calls. @@ -80,7 +81,11 @@ type boRetryer struct { } func (r *boRetryer) Retry(err error) (time.Duration, bool) { - c := grpc.Code(err) + st, ok := status.FromError(err) + if !ok { + return 0, false + } + c := st.Code() for _, rc := range r.codes { if c == rc { return r.backoff.Pause(), true @@ -121,6 +126,9 @@ func (bo *Backoff) Pause() time.Duration { if bo.Multiplier < 1 { bo.Multiplier = 2 } + // Select a duration between zero and the current max. It might seem counterintuitive to + // have so much jitter, but https://www.awsarchitectureblog.com/2015/03/backoff.html + // argues that that is the best strategy. d := time.Duration(rand.Int63n(int64(bo.cur))) bo.cur = time.Duration(float64(bo.cur) * bo.Multiplier) if bo.cur > bo.Max { @@ -129,8 +137,21 @@ func (bo *Backoff) Pause() time.Duration { return d } +type grpcOpt []grpc.CallOption + +func (o grpcOpt) Resolve(s *CallSettings) { + s.GRPC = o +} + +func WithGRPCOptions(opt ...grpc.CallOption) CallOption { + return grpcOpt(append([]grpc.CallOption(nil), opt...)) +} + type CallSettings struct { // Retry returns a Retryer to be used to control retry logic of a method call. // If Retry is nil or the returned Retryer is nil, the call will not be retried. Retry func() Retryer + + // CallOptions to be forwarded to GRPC. + GRPC []grpc.CallOption } diff --git a/src/stackdriver-nozzle/vendor/github.com/googleapis/gax-go/gax.go b/src/stackdriver-nozzle/vendor/github.com/googleapis/gax-go/gax.go index c7e4ce91..5ebedff0 100644 --- a/src/stackdriver-nozzle/vendor/github.com/googleapis/gax-go/gax.go +++ b/src/stackdriver-nozzle/vendor/github.com/googleapis/gax-go/gax.go @@ -27,6 +27,14 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// Package gax contains a set of modules which aid the development of APIs +// for clients and servers based on gRPC and Google API conventions. +// +// Application code will rarely need to use this library directly. +// However, code generated automatically from API definition files can use it +// to simplify code generation and to provide more convenient and idiomatic API surfaces. +// +// This project is currently experimental and not supported. package gax const Version = "0.1.0" diff --git a/src/stackdriver-nozzle/vendor/github.com/googleapis/gax-go/header.go b/src/stackdriver-nozzle/vendor/github.com/googleapis/gax-go/header.go new file mode 100644 index 00000000..d81455ec --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/googleapis/gax-go/header.go @@ -0,0 +1,24 @@ +package gax + +import "bytes" + +// XGoogHeader is for use by the Google Cloud Libraries only. +// +// XGoogHeader formats key-value pairs. +// The resulting string is suitable for x-goog-api-client header. +func XGoogHeader(keyval ...string) string { + if len(keyval) == 0 { + return "" + } + if len(keyval)%2 != 0 { + panic("gax.Header: odd argument count") + } + var buf bytes.Buffer + for i := 0; i < len(keyval); i += 2 { + buf.WriteByte(' ') + buf.WriteString(keyval[i]) + buf.WriteByte('/') + buf.WriteString(keyval[i+1]) + } + return buf.String()[1:] +} diff --git a/src/stackdriver-nozzle/vendor/github.com/googleapis/gax-go/invoke.go b/src/stackdriver-nozzle/vendor/github.com/googleapis/gax-go/invoke.go index 644c677e..86049d82 100644 --- a/src/stackdriver-nozzle/vendor/github.com/googleapis/gax-go/invoke.go +++ b/src/stackdriver-nozzle/vendor/github.com/googleapis/gax-go/invoke.go @@ -36,7 +36,7 @@ import ( ) // A user defined call stub. -type APICall func(context.Context) error +type APICall func(context.Context, CallSettings) error // Invoke calls the given APICall, // performing retries as specified by opts, if any. @@ -45,19 +45,29 @@ func Invoke(ctx context.Context, call APICall, opts ...CallOption) error { for _, opt := range opts { opt.Resolve(&settings) } - return invoke(ctx, call, settings, timeSleeper{}) + return invoke(ctx, call, settings, Sleep) } -type sleeper interface { - // Sleep sleeps for duration d or until ctx.Done() closes, whichever happens first. - // If ctx.Done() closes, Sleep returns ctx.Err(), otherwise it returns nil. - Sleep(ctx context.Context, d time.Duration) error +// Sleep is similar to time.Sleep, but it can be interrupted by ctx.Done() closing. +// If interrupted, Sleep returns ctx.Err(). +func Sleep(ctx context.Context, d time.Duration) error { + t := time.NewTimer(d) + select { + case <-ctx.Done(): + t.Stop() + return ctx.Err() + case <-t.C: + return nil + } } +type sleeper func(ctx context.Context, d time.Duration) error + +// invoke implements Invoke, taking an additional sleeper argument for testing. func invoke(ctx context.Context, call APICall, settings CallSettings, sp sleeper) error { var retryer Retryer for { - err := call(ctx) + err := call(ctx, settings) if err == nil { return nil } @@ -73,19 +83,8 @@ func invoke(ctx context.Context, call APICall, settings CallSettings, sp sleeper } if d, ok := retryer.Retry(err); !ok { return err - } else if err = sp.Sleep(ctx, d); err != nil { + } else if err = sp(ctx, d); err != nil { return err } } } - -type timeSleeper struct{} - -func (s timeSleeper) Sleep(ctx context.Context, d time.Duration) error { - select { - case <-ctx.Done(): - return ctx.Err() - case <-time.After(d): - return nil - } -} diff --git a/src/stackdriver-nozzle/vendor/github.com/gorilla/websocket/README.md b/src/stackdriver-nozzle/vendor/github.com/gorilla/websocket/README.md index 9d71959e..33c3d2be 100644 --- a/src/stackdriver-nozzle/vendor/github.com/gorilla/websocket/README.md +++ b/src/stackdriver-nozzle/vendor/github.com/gorilla/websocket/README.md @@ -3,6 +3,9 @@ Gorilla WebSocket is a [Go](http://golang.org/) implementation of the [WebSocket](http://www.rfc-editor.org/rfc/rfc6455.txt) protocol. +[![Build Status](https://travis-ci.org/gorilla/websocket.svg?branch=master)](https://travis-ci.org/gorilla/websocket) +[![GoDoc](https://godoc.org/github.com/gorilla/websocket?status.svg)](https://godoc.org/github.com/gorilla/websocket) + ### Documentation * [API Reference](http://godoc.org/github.com/gorilla/websocket) @@ -43,7 +46,7 @@ subdirectory](https://github.com/gorilla/websocket/tree/master/examples/autobahn Send pings and receive pongsYesNo Get the type of a received data messageYesYes, see note 2 Other Features -Limit size of received messageYesNo +Compression ExtensionsExperimentalNo Read message using io.ReaderYesNo, see note 3 Write message using io.WriteCloserYesNo, see note 3 diff --git a/src/stackdriver-nozzle/vendor/github.com/gorilla/websocket/client.go b/src/stackdriver-nozzle/vendor/github.com/gorilla/websocket/client.go index 879d33ed..43a87c75 100644 --- a/src/stackdriver-nozzle/vendor/github.com/gorilla/websocket/client.go +++ b/src/stackdriver-nozzle/vendor/github.com/gorilla/websocket/client.go @@ -23,6 +23,8 @@ import ( // invalid. var ErrBadHandshake = errors.New("websocket: bad handshake") +var errInvalidCompression = errors.New("websocket: invalid compression negotiation") + // NewClient creates a new client connection using the given net connection. // The URL u specifies the host and request URI. Use requestHeader to specify // the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies @@ -64,12 +66,24 @@ type Dialer struct { // HandshakeTimeout specifies the duration for the handshake to complete. HandshakeTimeout time.Duration - // Input and output buffer sizes. If the buffer size is zero, then a - // default value of 4096 is used. + // ReadBufferSize and WriteBufferSize specify I/O buffer sizes. If a buffer + // size is zero, then a useful default size is used. The I/O buffer sizes + // do not limit the size of the messages that can be sent or received. ReadBufferSize, WriteBufferSize int // Subprotocols specifies the client's requested subprotocols. Subprotocols []string + + // EnableCompression specifies if the client should attempt to negotiate + // per message compression (RFC 7692). Setting this value to true does not + // guarantee that compression will be supported. Currently only "no context + // takeover" modes are supported. + EnableCompression bool + + // Jar specifies the cookie jar. + // If Jar is nil, cookies are not sent in requests and ignored + // in responses. + Jar http.CookieJar } var errMalformedURL = errors.New("malformed ws or wss URL") @@ -83,7 +97,6 @@ func parseURL(s string) (*url.URL, error) { // // ws-URI = "ws:" "//" host [ ":" port ] path [ "?" query ] // wss-URI = "wss:" "//" host [ ":" port ] path [ "?" query ] - var u url.URL switch { case strings.HasPrefix(s, "ws://"): @@ -193,6 +206,13 @@ func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Re Host: u.Host, } + // Set the cookies present in the cookie jar of the dialer + if d.Jar != nil { + for _, cookie := range d.Jar.Cookies(u) { + req.AddCookie(cookie) + } + } + // Set the request headers using the capitalization for names and values in // RFC examples. Although the capitalization shouldn't matter, there are // servers that depend on it. The Header.Set method is not used because the @@ -214,6 +234,7 @@ func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Re k == "Connection" || k == "Sec-Websocket-Key" || k == "Sec-Websocket-Version" || + k == "Sec-Websocket-Extensions" || (k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0): return nil, nil, errors.New("websocket: duplicate header not allowed: " + k) default: @@ -221,6 +242,10 @@ func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Re } } + if d.EnableCompression { + req.Header.Set("Sec-Websocket-Extensions", "permessage-deflate; server_no_context_takeover; client_no_context_takeover") + } + hostPort, hostNoPort := hostPortNoPort(u) var proxyURL *url.URL @@ -324,6 +349,13 @@ func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Re if err != nil { return nil, nil, err } + + if d.Jar != nil { + if rc := resp.Cookies(); len(rc) > 0 { + d.Jar.SetCookies(u, rc) + } + } + if resp.StatusCode != 101 || !strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") || !strings.EqualFold(resp.Header.Get("Connection"), "upgrade") || @@ -337,6 +369,20 @@ func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Re return nil, resp, ErrBadHandshake } + for _, ext := range parseExtensions(resp.Header) { + if ext[""] != "permessage-deflate" { + continue + } + _, snct := ext["server_no_context_takeover"] + _, cnct := ext["client_no_context_takeover"] + if !snct || !cnct { + return nil, resp, errInvalidCompression + } + conn.newCompressionWriter = compressNoContextTakeover + conn.newDecompressionReader = decompressNoContextTakeover + break + } + resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{})) conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol") @@ -344,32 +390,3 @@ func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Re netConn = nil // to avoid close in defer. return conn, resp, nil } - -// cloneTLSConfig clones all public fields except the fields -// SessionTicketsDisabled and SessionTicketKey. This avoids copying the -// sync.Mutex in the sync.Once and makes it safe to call cloneTLSConfig on a -// config in active use. -func cloneTLSConfig(cfg *tls.Config) *tls.Config { - if cfg == nil { - return &tls.Config{} - } - return &tls.Config{ - Rand: cfg.Rand, - Time: cfg.Time, - Certificates: cfg.Certificates, - NameToCertificate: cfg.NameToCertificate, - GetCertificate: cfg.GetCertificate, - RootCAs: cfg.RootCAs, - NextProtos: cfg.NextProtos, - ServerName: cfg.ServerName, - ClientAuth: cfg.ClientAuth, - ClientCAs: cfg.ClientCAs, - InsecureSkipVerify: cfg.InsecureSkipVerify, - CipherSuites: cfg.CipherSuites, - PreferServerCipherSuites: cfg.PreferServerCipherSuites, - ClientSessionCache: cfg.ClientSessionCache, - MinVersion: cfg.MinVersion, - MaxVersion: cfg.MaxVersion, - CurvePreferences: cfg.CurvePreferences, - } -} diff --git a/src/stackdriver-nozzle/vendor/github.com/gorilla/websocket/client_clone.go b/src/stackdriver-nozzle/vendor/github.com/gorilla/websocket/client_clone.go new file mode 100644 index 00000000..4f0d9437 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/gorilla/websocket/client_clone.go @@ -0,0 +1,16 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +package websocket + +import "crypto/tls" + +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + return cfg.Clone() +} diff --git a/src/stackdriver-nozzle/vendor/github.com/gorilla/websocket/client_clone_legacy.go b/src/stackdriver-nozzle/vendor/github.com/gorilla/websocket/client_clone_legacy.go new file mode 100644 index 00000000..babb007f --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/gorilla/websocket/client_clone_legacy.go @@ -0,0 +1,38 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.8 + +package websocket + +import "crypto/tls" + +// cloneTLSConfig clones all public fields except the fields +// SessionTicketsDisabled and SessionTicketKey. This avoids copying the +// sync.Mutex in the sync.Once and makes it safe to call cloneTLSConfig on a +// config in active use. +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + return &tls.Config{ + Rand: cfg.Rand, + Time: cfg.Time, + Certificates: cfg.Certificates, + NameToCertificate: cfg.NameToCertificate, + GetCertificate: cfg.GetCertificate, + RootCAs: cfg.RootCAs, + NextProtos: cfg.NextProtos, + ServerName: cfg.ServerName, + ClientAuth: cfg.ClientAuth, + ClientCAs: cfg.ClientCAs, + InsecureSkipVerify: cfg.InsecureSkipVerify, + CipherSuites: cfg.CipherSuites, + PreferServerCipherSuites: cfg.PreferServerCipherSuites, + ClientSessionCache: cfg.ClientSessionCache, + MinVersion: cfg.MinVersion, + MaxVersion: cfg.MaxVersion, + CurvePreferences: cfg.CurvePreferences, + } +} diff --git a/src/stackdriver-nozzle/vendor/github.com/gorilla/websocket/compression.go b/src/stackdriver-nozzle/vendor/github.com/gorilla/websocket/compression.go index e2ac7617..813ffb1e 100644 --- a/src/stackdriver-nozzle/vendor/github.com/gorilla/websocket/compression.go +++ b/src/stackdriver-nozzle/vendor/github.com/gorilla/websocket/compression.go @@ -1,4 +1,4 @@ -// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. @@ -9,22 +9,48 @@ import ( "errors" "io" "strings" + "sync" ) -func decompressNoContextTakeover(r io.Reader) io.Reader { +const ( + minCompressionLevel = -2 // flate.HuffmanOnly not defined in Go < 1.6 + maxCompressionLevel = flate.BestCompression + defaultCompressionLevel = 1 +) + +var ( + flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool + flateReaderPool = sync.Pool{New: func() interface{} { + return flate.NewReader(nil) + }} +) + +func decompressNoContextTakeover(r io.Reader) io.ReadCloser { const tail = // Add four bytes as specified in RFC "\x00\x00\xff\xff" + // Add final block to squelch unexpected EOF error from flate reader. "\x01\x00\x00\xff\xff" - return flate.NewReader(io.MultiReader(r, strings.NewReader(tail))) + fr, _ := flateReaderPool.Get().(io.ReadCloser) + fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil) + return &flateReadWrapper{fr} } -func compressNoContextTakeover(w io.WriteCloser) (io.WriteCloser, error) { +func isValidCompressionLevel(level int) bool { + return minCompressionLevel <= level && level <= maxCompressionLevel +} + +func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser { + p := &flateWriterPools[level-minCompressionLevel] tw := &truncWriter{w: w} - fw, err := flate.NewWriter(tw, 3) - return &flateWrapper{fw: fw, tw: tw}, err + fw, _ := p.Get().(*flate.Writer) + if fw == nil { + fw, _ = flate.NewWriter(tw, level) + } else { + fw.Reset(tw) + } + return &flateWriteWrapper{fw: fw, tw: tw, p: p} } // truncWriter is an io.Writer that writes all but the last four bytes of the @@ -63,17 +89,26 @@ func (w *truncWriter) Write(p []byte) (int, error) { return n + nn, err } -type flateWrapper struct { +type flateWriteWrapper struct { fw *flate.Writer tw *truncWriter + p *sync.Pool } -func (w *flateWrapper) Write(p []byte) (int, error) { +func (w *flateWriteWrapper) Write(p []byte) (int, error) { + if w.fw == nil { + return 0, errWriteClosed + } return w.fw.Write(p) } -func (w *flateWrapper) Close() error { +func (w *flateWriteWrapper) Close() error { + if w.fw == nil { + return errWriteClosed + } err1 := w.fw.Flush() + w.p.Put(w.fw) + w.fw = nil if w.tw.p != [4]byte{0, 0, 0xff, 0xff} { return errors.New("websocket: internal error, unexpected bytes at end of flate stream") } @@ -83,3 +118,31 @@ func (w *flateWrapper) Close() error { } return err2 } + +type flateReadWrapper struct { + fr io.ReadCloser +} + +func (r *flateReadWrapper) Read(p []byte) (int, error) { + if r.fr == nil { + return 0, io.ErrClosedPipe + } + n, err := r.fr.Read(p) + if err == io.EOF { + // Preemptively place the reader back in the pool. This helps with + // scenarios where the application does not call NextReader() soon after + // this final read. + r.Close() + } + return n, err +} + +func (r *flateReadWrapper) Close() error { + if r.fr == nil { + return io.ErrClosedPipe + } + err := r.fr.Close() + flateReaderPool.Put(r.fr) + r.fr = nil + return err +} diff --git a/src/stackdriver-nozzle/vendor/github.com/gorilla/websocket/conn.go b/src/stackdriver-nozzle/vendor/github.com/gorilla/websocket/conn.go index eb4334e7..97e1dbac 100644 --- a/src/stackdriver-nozzle/vendor/github.com/gorilla/websocket/conn.go +++ b/src/stackdriver-nozzle/vendor/github.com/gorilla/websocket/conn.go @@ -13,6 +13,7 @@ import ( "math/rand" "net" "strconv" + "sync" "time" "unicode/utf8" ) @@ -180,6 +181,11 @@ var ( errInvalidControlFrame = errors.New("websocket: invalid control frame") ) +func newMaskKey() [4]byte { + n := rand.Uint32() + return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)} +} + func hideTempErr(err error) error { if e, ok := err.(net.Error); ok && e.Temporary() { err = &netError{msg: e.Error(), timeout: e.Timeout()} @@ -218,42 +224,28 @@ func isValidReceivedCloseCode(code int) bool { return validReceivedCloseCodes[code] || (code >= 3000 && code <= 4999) } -func maskBytes(key [4]byte, pos int, b []byte) int { - for i := range b { - b[i] ^= key[pos&3] - pos++ - } - return pos & 3 -} - -func newMaskKey() [4]byte { - n := rand.Uint32() - return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)} -} - -// Conn represents a WebSocket connection. +// The Conn type represents a WebSocket connection. type Conn struct { conn net.Conn isServer bool subprotocol string // Write fields - mu chan bool // used as mutex to protect write to conn and closeSent - closeSent bool // whether close message was sent - writeErr error - writeBuf []byte // frame is constructed in this buffer. - writePos int // end of data in writeBuf. - writeFrameType int // type of the current frame. - writeDeadline time.Time - messageWriter *messageWriter // the current low-level message writer - writer io.WriteCloser // the current writer returned to the application - isWriting bool // for best-effort concurrent write detection + mu chan bool // used as mutex to protect write to conn + writeBuf []byte // frame is constructed in this buffer. + writeDeadline time.Time + writer io.WriteCloser // the current writer returned to the application + isWriting bool // for best-effort concurrent write detection + + writeErrMu sync.Mutex + writeErr error enableWriteCompression bool - writeCompress bool // whether next call to flushFrame should set RSV1 - newCompressionWriter func(io.WriteCloser) (io.WriteCloser, error) + compressionLevel int + newCompressionWriter func(io.WriteCloser, int) io.WriteCloser // Read fields + reader io.ReadCloser // the current reader returned to the application readErr error br *bufio.Reader readRemaining int64 // bytes remaining in current frame. @@ -264,38 +256,83 @@ type Conn struct { readMaskKey [4]byte handlePong func(string) error handlePing func(string) error + handleClose func(int, string) error readErrCount int messageReader *messageReader // the current low-level reader readDecompress bool // whether last read frame had RSV1 set - newDecompressionReader func(io.Reader) io.Reader + newDecompressionReader func(io.Reader) io.ReadCloser } func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int) *Conn { + return newConnBRW(conn, isServer, readBufferSize, writeBufferSize, nil) +} + +type writeHook struct { + p []byte +} + +func (wh *writeHook) Write(p []byte) (int, error) { + wh.p = p + return len(p), nil +} + +func newConnBRW(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int, brw *bufio.ReadWriter) *Conn { mu := make(chan bool, 1) mu <- true - if readBufferSize == 0 { - readBufferSize = defaultReadBufferSize + var br *bufio.Reader + if readBufferSize == 0 && brw != nil && brw.Reader != nil { + // Reuse the supplied bufio.Reader if the buffer has a useful size. + // This code assumes that peek on a reader returns + // bufio.Reader.buf[:0]. + brw.Reader.Reset(conn) + if p, err := brw.Reader.Peek(0); err == nil && cap(p) >= 256 { + br = brw.Reader + } } - if readBufferSize < maxControlFramePayloadSize { - readBufferSize = maxControlFramePayloadSize + if br == nil { + if readBufferSize == 0 { + readBufferSize = defaultReadBufferSize + } + if readBufferSize < maxControlFramePayloadSize { + readBufferSize = maxControlFramePayloadSize + } + br = bufio.NewReaderSize(conn, readBufferSize) + } + + var writeBuf []byte + if writeBufferSize == 0 && brw != nil && brw.Writer != nil { + // Use the bufio.Writer's buffer if the buffer has a useful size. This + // code assumes that bufio.Writer.buf[:1] is passed to the + // bufio.Writer's underlying writer. + var wh writeHook + brw.Writer.Reset(&wh) + brw.Writer.WriteByte(0) + brw.Flush() + if cap(wh.p) >= maxFrameHeaderSize+256 { + writeBuf = wh.p[:cap(wh.p)] + } } - if writeBufferSize == 0 { - writeBufferSize = defaultWriteBufferSize + + if writeBuf == nil { + if writeBufferSize == 0 { + writeBufferSize = defaultWriteBufferSize + } + writeBuf = make([]byte, writeBufferSize+maxFrameHeaderSize) } c := &Conn{ isServer: isServer, - br: bufio.NewReaderSize(conn, readBufferSize), + br: br, conn: conn, mu: mu, readFinal: true, - writeBuf: make([]byte, writeBufferSize+maxFrameHeaderSize), - writeFrameType: noFrame, - writePos: maxFrameHeaderSize, + writeBuf: writeBuf, enableWriteCompression: true, + compressionLevel: defaultCompressionLevel, } + c.SetCloseHandler(nil) c.SetPingHandler(nil) c.SetPongHandler(nil) return c @@ -323,29 +360,40 @@ func (c *Conn) RemoteAddr() net.Addr { // Write methods +func (c *Conn) writeFatal(err error) error { + err = hideTempErr(err) + c.writeErrMu.Lock() + if c.writeErr == nil { + c.writeErr = err + } + c.writeErrMu.Unlock() + return err +} + func (c *Conn) write(frameType int, deadline time.Time, bufs ...[]byte) error { <-c.mu defer func() { c.mu <- true }() - if c.closeSent { - return ErrCloseSent - } else if frameType == CloseMessage { - c.closeSent = true + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err } c.conn.SetWriteDeadline(deadline) for _, buf := range bufs { if len(buf) > 0 { - n, err := c.conn.Write(buf) - if n != len(buf) { - // Close on partial write. - c.conn.Close() - } + _, err := c.conn.Write(buf) if err != nil { - return err + return c.writeFatal(err) } } } + + if frameType == CloseMessage { + c.writeFatal(ErrCloseSent) + } return nil } @@ -394,84 +442,103 @@ func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) er } defer func() { c.mu <- true }() - if c.closeSent { - return ErrCloseSent - } else if messageType == CloseMessage { - c.closeSent = true + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err } c.conn.SetWriteDeadline(deadline) - n, err := c.conn.Write(buf) - if n != 0 && n != len(buf) { - c.conn.Close() + _, err = c.conn.Write(buf) + if err != nil { + return c.writeFatal(err) } - return hideTempErr(err) -} - -// NextWriter returns a writer for the next message to send. The writer's Close -// method flushes the complete message to the network. -// -// There can be at most one open writer on a connection. NextWriter closes the -// previous writer if the application has not already done so. -func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) { - if c.writeErr != nil { - return nil, c.writeErr + if messageType == CloseMessage { + c.writeFatal(ErrCloseSent) } + return err +} +func (c *Conn) prepWrite(messageType int) error { // Close previous writer if not already closed by the application. It's // probably better to return an error in this situation, but we cannot // change this without breaking existing applications. if c.writer != nil { - err := c.writer.Close() - if err != nil { - return nil, err - } + c.writer.Close() + c.writer = nil } if !isControl(messageType) && !isData(messageType) { - return nil, errBadWriteOpCode + return errBadWriteOpCode } - c.writeFrameType = messageType - c.messageWriter = &messageWriter{c} + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + return err +} + +// NextWriter returns a writer for the next message to send. The writer's Close +// method flushes the complete message to the network. +// +// There can be at most one open writer on a connection. NextWriter closes the +// previous writer if the application has not already done so. +func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) { + if err := c.prepWrite(messageType); err != nil { + return nil, err + } - var w io.WriteCloser = c.messageWriter + mw := &messageWriter{ + c: c, + frameType: messageType, + pos: maxFrameHeaderSize, + } + c.writer = mw if c.newCompressionWriter != nil && c.enableWriteCompression && isData(messageType) { - c.writeCompress = true - var err error - w, err = c.newCompressionWriter(w) - if err != nil { - c.writer.Close() - return nil, err - } + w := c.newCompressionWriter(c.writer, c.compressionLevel) + mw.compress = true + c.writer = w } + return c.writer, nil +} - return w, nil +type messageWriter struct { + c *Conn + compress bool // whether next call to flushFrame should set RSV1 + pos int // end of data in writeBuf. + frameType int // type of the current frame. + err error +} + +func (w *messageWriter) fatal(err error) error { + if w.err != nil { + w.err = err + w.c.writer = nil + } + return err } // flushFrame writes buffered data and extra as a frame to the network. The // final argument indicates that this is the last frame in the message. -func (c *Conn) flushFrame(final bool, extra []byte) error { - length := c.writePos - maxFrameHeaderSize + len(extra) +func (w *messageWriter) flushFrame(final bool, extra []byte) error { + c := w.c + length := w.pos - maxFrameHeaderSize + len(extra) // Check for invalid control frames. - if isControl(c.writeFrameType) && + if isControl(w.frameType) && (!final || length > maxControlFramePayloadSize) { - c.messageWriter = nil - c.writer = nil - c.writeFrameType = noFrame - c.writePos = maxFrameHeaderSize - return errInvalidControlFrame + return w.fatal(errInvalidControlFrame) } - b0 := byte(c.writeFrameType) + b0 := byte(w.frameType) if final { b0 |= finalBit } - if c.writeCompress { + if w.compress { b0 |= rsv1Bit } - c.writeCompress = false + w.compress = false b1 := byte(0) if !c.isServer { @@ -504,10 +571,9 @@ func (c *Conn) flushFrame(final bool, extra []byte) error { if !c.isServer { key := newMaskKey() copy(c.writeBuf[maxFrameHeaderSize-4:], key[:]) - maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:c.writePos]) + maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:w.pos]) if len(extra) > 0 { - c.writeErr = errors.New("websocket: internal error, extra used in client mode") - return c.writeErr + return c.writeFatal(errors.New("websocket: internal error, extra used in client mode")) } } @@ -520,44 +586,35 @@ func (c *Conn) flushFrame(final bool, extra []byte) error { } c.isWriting = true - c.writeErr = c.write(c.writeFrameType, c.writeDeadline, c.writeBuf[framePos:c.writePos], extra) + err := c.write(w.frameType, c.writeDeadline, c.writeBuf[framePos:w.pos], extra) if !c.isWriting { panic("concurrent write to websocket connection") } c.isWriting = false - // Setup for next frame. - c.writePos = maxFrameHeaderSize - c.writeFrameType = continuationFrame + if err != nil { + return w.fatal(err) + } + if final { - c.messageWriter = nil c.writer = nil - c.writeFrameType = noFrame + return nil } - return c.writeErr -} -type messageWriter struct{ c *Conn } - -func (w *messageWriter) err() error { - c := w.c - if c.messageWriter != w { - return errWriteClosed - } - if c.writeErr != nil { - return c.writeErr - } + // Setup for next frame. + w.pos = maxFrameHeaderSize + w.frameType = continuationFrame return nil } func (w *messageWriter) ncopy(max int) (int, error) { - n := len(w.c.writeBuf) - w.c.writePos + n := len(w.c.writeBuf) - w.pos if n <= 0 { - if err := w.c.flushFrame(false, nil); err != nil { + if err := w.flushFrame(false, nil); err != nil { return 0, err } - n = len(w.c.writeBuf) - w.c.writePos + n = len(w.c.writeBuf) - w.pos } if n > max { n = max @@ -566,13 +623,13 @@ func (w *messageWriter) ncopy(max int) (int, error) { } func (w *messageWriter) Write(p []byte) (int, error) { - if err := w.err(); err != nil { - return 0, err + if w.err != nil { + return 0, w.err } if len(p) > 2*len(w.c.writeBuf) && w.c.isServer { // Don't buffer large messages. - err := w.c.flushFrame(false, p) + err := w.flushFrame(false, p) if err != nil { return 0, err } @@ -585,16 +642,16 @@ func (w *messageWriter) Write(p []byte) (int, error) { if err != nil { return 0, err } - copy(w.c.writeBuf[w.c.writePos:], p[:n]) - w.c.writePos += n + copy(w.c.writeBuf[w.pos:], p[:n]) + w.pos += n p = p[n:] } return nn, nil } func (w *messageWriter) WriteString(p string) (int, error) { - if err := w.err(); err != nil { - return 0, err + if w.err != nil { + return 0, w.err } nn := len(p) @@ -603,27 +660,27 @@ func (w *messageWriter) WriteString(p string) (int, error) { if err != nil { return 0, err } - copy(w.c.writeBuf[w.c.writePos:], p[:n]) - w.c.writePos += n + copy(w.c.writeBuf[w.pos:], p[:n]) + w.pos += n p = p[n:] } return nn, nil } func (w *messageWriter) ReadFrom(r io.Reader) (nn int64, err error) { - if err := w.err(); err != nil { - return 0, err + if w.err != nil { + return 0, w.err } for { - if w.c.writePos == len(w.c.writeBuf) { - err = w.c.flushFrame(false, nil) + if w.pos == len(w.c.writeBuf) { + err = w.flushFrame(false, nil) if err != nil { break } } var n int - n, err = r.Read(w.c.writeBuf[w.c.writePos:]) - w.c.writePos += n + n, err = r.Read(w.c.writeBuf[w.pos:]) + w.pos += n nn += int64(n) if err != nil { if err == io.EOF { @@ -636,27 +693,59 @@ func (w *messageWriter) ReadFrom(r io.Reader) (nn int64, err error) { } func (w *messageWriter) Close() error { - if err := w.err(); err != nil { + if w.err != nil { + return w.err + } + if err := w.flushFrame(true, nil); err != nil { return err } - return w.c.flushFrame(true, nil) + w.err = errWriteClosed + return nil +} + +// WritePreparedMessage writes prepared message into connection. +func (c *Conn) WritePreparedMessage(pm *PreparedMessage) error { + frameType, frameData, err := pm.frame(prepareKey{ + isServer: c.isServer, + compress: c.newCompressionWriter != nil && c.enableWriteCompression && isData(pm.messageType), + compressionLevel: c.compressionLevel, + }) + if err != nil { + return err + } + if c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = true + err = c.write(frameType, c.writeDeadline, frameData, nil) + if !c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = false + return err } // WriteMessage is a helper method for getting a writer using NextWriter, // writing the message and closing the writer. func (c *Conn) WriteMessage(messageType int, data []byte) error { + + if c.isServer && (c.newCompressionWriter == nil || !c.enableWriteCompression) { + // Fast path with no allocations and single frame. + + if err := c.prepWrite(messageType); err != nil { + return err + } + mw := messageWriter{c: c, frameType: messageType, pos: maxFrameHeaderSize} + n := copy(c.writeBuf[mw.pos:], data) + mw.pos += n + data = data[n:] + return mw.flushFrame(true, data) + } + w, err := c.NextWriter(messageType) if err != nil { return err } - if _, ok := w.(*messageWriter); ok && c.isServer { - // Optimize write as a single frame. - n := copy(c.writeBuf[c.writePos:], data) - c.writePos += n - data = data[n:] - err = c.flushFrame(true, data) - return err - } if _, err = w.Write(data); err != nil { return err } @@ -799,11 +888,9 @@ func (c *Conn) advanceFrame() (int, error) { return noFrame, err } case CloseMessage: - echoMessage := []byte{} closeCode := CloseNoStatusReceived closeText := "" if len(payload) >= 2 { - echoMessage = payload[:2] closeCode = int(binary.BigEndian.Uint16(payload)) if !isValidReceivedCloseCode(closeCode) { return noFrame, c.handleProtocolError("invalid close code") @@ -813,7 +900,9 @@ func (c *Conn) advanceFrame() (int, error) { return noFrame, c.handleProtocolError("invalid utf8 payload in close frame") } } - c.WriteControl(CloseMessage, echoMessage, time.Now().Add(writeWait)) + if err := c.handleClose(closeCode, closeText); err != nil { + return noFrame, err + } return noFrame, &CloseError{Code: closeCode, Text: closeText} } @@ -836,6 +925,11 @@ func (c *Conn) handleProtocolError(message string) error { // permanent. Once this method returns a non-nil error, all subsequent calls to // this method return the same error. func (c *Conn) NextReader() (messageType int, r io.Reader, err error) { + // Close previous reader, only relevant for decompression. + if c.reader != nil { + c.reader.Close() + c.reader = nil + } c.messageReader = nil c.readLength = 0 @@ -848,11 +942,11 @@ func (c *Conn) NextReader() (messageType int, r io.Reader, err error) { } if frameType == TextMessage || frameType == BinaryMessage { c.messageReader = &messageReader{c} - var r io.Reader = c.messageReader + c.reader = c.messageReader if c.readDecompress { - r = c.newDecompressionReader(r) + c.reader = c.newDecompressionReader(c.reader) } - return frameType, r, nil + return frameType, c.reader, nil } } @@ -914,6 +1008,10 @@ func (r *messageReader) Read(b []byte) (int, error) { return 0, err } +func (r *messageReader) Close() error { + return nil +} + // ReadMessage is a helper method for getting a reader using NextReader and // reading from that reader to a buffer. func (c *Conn) ReadMessage() (messageType int, p []byte, err error) { @@ -941,6 +1039,38 @@ func (c *Conn) SetReadLimit(limit int64) { c.readLimit = limit } +// CloseHandler returns the current close handler +func (c *Conn) CloseHandler() func(code int, text string) error { + return c.handleClose +} + +// SetCloseHandler sets the handler for close messages received from the peer. +// The code argument to h is the received close code or CloseNoStatusReceived +// if the close message is empty. The default close handler sends a close frame +// back to the peer. +// +// The application must read the connection to process close messages as +// described in the section on Control Frames above. +// +// The connection read methods return a CloseError when a close frame is +// received. Most applications should handle close messages as part of their +// normal error handling. Applications should only set a close handler when the +// application must perform some action before sending a close frame back to +// the peer. +func (c *Conn) SetCloseHandler(h func(code int, text string) error) { + if h == nil { + h = func(code int, text string) error { + message := []byte{} + if code != CloseNoStatusReceived { + message = FormatCloseMessage(code, "") + } + c.WriteControl(CloseMessage, message, time.Now().Add(writeWait)) + return nil + } + } + c.handleClose = h +} + // PingHandler returns the current ping handler func (c *Conn) PingHandler() func(appData string) error { return c.handlePing @@ -949,6 +1079,9 @@ func (c *Conn) PingHandler() func(appData string) error { // SetPingHandler sets the handler for ping messages received from the peer. // The appData argument to h is the PING frame application data. The default // ping handler sends a pong to the peer. +// +// The application must read the connection to process ping messages as +// described in the section on Control Frames above. func (c *Conn) SetPingHandler(h func(appData string) error) { if h == nil { h = func(message string) error { @@ -972,6 +1105,9 @@ func (c *Conn) PongHandler() func(appData string) error { // SetPongHandler sets the handler for pong messages received from the peer. // The appData argument to h is the PONG frame application data. The default // pong handler does nothing. +// +// The application must read the connection to process ping messages as +// described in the section on Control Frames above. func (c *Conn) SetPongHandler(h func(appData string) error) { if h == nil { h = func(string) error { return nil } @@ -985,6 +1121,25 @@ func (c *Conn) UnderlyingConn() net.Conn { return c.conn } +// EnableWriteCompression enables and disables write compression of +// subsequent text and binary messages. This function is a noop if +// compression was not negotiated with the peer. +func (c *Conn) EnableWriteCompression(enable bool) { + c.enableWriteCompression = enable +} + +// SetCompressionLevel sets the flate compression level for subsequent text and +// binary messages. This function is a noop if compression was not negotiated +// with the peer. See the compress/flate package for a description of +// compression levels. +func (c *Conn) SetCompressionLevel(level int) error { + if !isValidCompressionLevel(level) { + return errors.New("websocket: invalid compression level") + } + c.compressionLevel = level + return nil +} + // FormatCloseMessage formats closeCode and text as a WebSocket close message. func FormatCloseMessage(closeCode int, text string) []byte { buf := make([]byte, 2+len(text)) diff --git a/src/stackdriver-nozzle/vendor/github.com/gorilla/websocket/doc.go b/src/stackdriver-nozzle/vendor/github.com/gorilla/websocket/doc.go index c901a7a9..f5ff0823 100644 --- a/src/stackdriver-nozzle/vendor/github.com/gorilla/websocket/doc.go +++ b/src/stackdriver-nozzle/vendor/github.com/gorilla/websocket/doc.go @@ -6,9 +6,8 @@ // // Overview // -// The Conn type represents a WebSocket connection. A server application uses -// the Upgrade function from an Upgrader object with a HTTP request handler -// to get a pointer to a Conn: +// The Conn type represents a WebSocket connection. A server application calls +// the Upgrader.Upgrade method from an HTTP request handler to get a *Conn: // // var upgrader = websocket.Upgrader{ // ReadBufferSize: 1024, @@ -33,7 +32,7 @@ // if err != nil { // return // } -// if err = conn.WriteMessage(messageType, p); err != nil { +// if err := conn.WriteMessage(messageType, p); err != nil { // return err // } // } @@ -118,9 +117,10 @@ // // Applications are responsible for ensuring that no more than one goroutine // calls the write methods (NextWriter, SetWriteDeadline, WriteMessage, -// WriteJSON) concurrently and that no more than one goroutine calls the read -// methods (NextReader, SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, -// SetPingHandler) concurrently. +// WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and +// that no more than one goroutine calls the read methods (NextReader, +// SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler) +// concurrently. // // The Close and WriteControl methods can be called concurrently with all other // methods. @@ -146,7 +146,34 @@ // CheckOrigin: func(r *http.Request) bool { return true }, // } // -// The deprecated Upgrade function does not enforce an origin policy. It's the -// application's responsibility to check the Origin header before calling -// Upgrade. +// The deprecated package-level Upgrade function does not perform origin +// checking. The application is responsible for checking the Origin header +// before calling the Upgrade function. +// +// Compression EXPERIMENTAL +// +// Per message compression extensions (RFC 7692) are experimentally supported +// by this package in a limited capacity. Setting the EnableCompression option +// to true in Dialer or Upgrader will attempt to negotiate per message deflate +// support. +// +// var upgrader = websocket.Upgrader{ +// EnableCompression: true, +// } +// +// If compression was successfully negotiated with the connection's peer, any +// message received in compressed form will be automatically decompressed. +// All Read methods will return uncompressed bytes. +// +// Per message compression of messages written to a connection can be enabled +// or disabled by calling the corresponding Conn method: +// +// conn.EnableWriteCompression(false) +// +// Currently this package does not support compression with "context takeover". +// This means that messages must be compressed and decompressed in isolation, +// without retaining sliding window or dictionary state across messages. For +// more details refer to RFC 7692. +// +// Use of compression is experimental and may result in decreased performance. package websocket diff --git a/src/stackdriver-nozzle/vendor/github.com/gorilla/websocket/json.go b/src/stackdriver-nozzle/vendor/github.com/gorilla/websocket/json.go index 4f0e3687..dc2c1f64 100644 --- a/src/stackdriver-nozzle/vendor/github.com/gorilla/websocket/json.go +++ b/src/stackdriver-nozzle/vendor/github.com/gorilla/websocket/json.go @@ -9,12 +9,14 @@ import ( "io" ) -// WriteJSON is deprecated, use c.WriteJSON instead. +// WriteJSON writes the JSON encoding of v as a message. +// +// Deprecated: Use c.WriteJSON instead. func WriteJSON(c *Conn, v interface{}) error { return c.WriteJSON(v) } -// WriteJSON writes the JSON encoding of v to the connection. +// WriteJSON writes the JSON encoding of v as a message. // // See the documentation for encoding/json Marshal for details about the // conversion of Go values to JSON. @@ -31,7 +33,10 @@ func (c *Conn) WriteJSON(v interface{}) error { return err2 } -// ReadJSON is deprecated, use c.ReadJSON instead. +// ReadJSON reads the next JSON-encoded message from the connection and stores +// it in the value pointed to by v. +// +// Deprecated: Use c.ReadJSON instead. func ReadJSON(c *Conn, v interface{}) error { return c.ReadJSON(v) } diff --git a/src/stackdriver-nozzle/vendor/github.com/gorilla/websocket/mask.go b/src/stackdriver-nozzle/vendor/github.com/gorilla/websocket/mask.go new file mode 100644 index 00000000..6a88bbc7 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/gorilla/websocket/mask.go @@ -0,0 +1,55 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of +// this source code is governed by a BSD-style license that can be found in the +// LICENSE file. + +// +build !appengine + +package websocket + +import "unsafe" + +const wordSize = int(unsafe.Sizeof(uintptr(0))) + +func maskBytes(key [4]byte, pos int, b []byte) int { + + // Mask one byte at a time for small buffers. + if len(b) < 2*wordSize { + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + return pos & 3 + } + + // Mask one byte at a time to word boundary. + if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 { + n = wordSize - n + for i := range b[:n] { + b[i] ^= key[pos&3] + pos++ + } + b = b[n:] + } + + // Create aligned word size key. + var k [wordSize]byte + for i := range k { + k[i] = key[(pos+i)&3] + } + kw := *(*uintptr)(unsafe.Pointer(&k)) + + // Mask one word at a time. + n := (len(b) / wordSize) * wordSize + for i := 0; i < n; i += wordSize { + *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw + } + + // Mask one byte at a time for remaining bytes. + b = b[n:] + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + + return pos & 3 +} diff --git a/src/stackdriver-nozzle/vendor/github.com/gorilla/websocket/mask_safe.go b/src/stackdriver-nozzle/vendor/github.com/gorilla/websocket/mask_safe.go new file mode 100644 index 00000000..2aac060e --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/gorilla/websocket/mask_safe.go @@ -0,0 +1,15 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of +// this source code is governed by a BSD-style license that can be found in the +// LICENSE file. + +// +build appengine + +package websocket + +func maskBytes(key [4]byte, pos int, b []byte) int { + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + return pos & 3 +} diff --git a/src/stackdriver-nozzle/vendor/github.com/gorilla/websocket/prepared.go b/src/stackdriver-nozzle/vendor/github.com/gorilla/websocket/prepared.go new file mode 100644 index 00000000..1efffbd1 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/gorilla/websocket/prepared.go @@ -0,0 +1,103 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bytes" + "net" + "sync" + "time" +) + +// PreparedMessage caches on the wire representations of a message payload. +// Use PreparedMessage to efficiently send a message payload to multiple +// connections. PreparedMessage is especially useful when compression is used +// because the CPU and memory expensive compression operation can be executed +// once for a given set of compression options. +type PreparedMessage struct { + messageType int + data []byte + err error + mu sync.Mutex + frames map[prepareKey]*preparedFrame +} + +// prepareKey defines a unique set of options to cache prepared frames in PreparedMessage. +type prepareKey struct { + isServer bool + compress bool + compressionLevel int +} + +// preparedFrame contains data in wire representation. +type preparedFrame struct { + once sync.Once + data []byte +} + +// NewPreparedMessage returns an initialized PreparedMessage. You can then send +// it to connection using WritePreparedMessage method. Valid wire +// representation will be calculated lazily only once for a set of current +// connection options. +func NewPreparedMessage(messageType int, data []byte) (*PreparedMessage, error) { + pm := &PreparedMessage{ + messageType: messageType, + frames: make(map[prepareKey]*preparedFrame), + data: data, + } + + // Prepare a plain server frame. + _, frameData, err := pm.frame(prepareKey{isServer: true, compress: false}) + if err != nil { + return nil, err + } + + // To protect against caller modifying the data argument, remember the data + // copied to the plain server frame. + pm.data = frameData[len(frameData)-len(data):] + return pm, nil +} + +func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) { + pm.mu.Lock() + frame, ok := pm.frames[key] + if !ok { + frame = &preparedFrame{} + pm.frames[key] = frame + } + pm.mu.Unlock() + + var err error + frame.once.Do(func() { + // Prepare a frame using a 'fake' connection. + // TODO: Refactor code in conn.go to allow more direct construction of + // the frame. + mu := make(chan bool, 1) + mu <- true + var nc prepareConn + c := &Conn{ + conn: &nc, + mu: mu, + isServer: key.isServer, + compressionLevel: key.compressionLevel, + enableWriteCompression: true, + writeBuf: make([]byte, defaultWriteBufferSize+maxFrameHeaderSize), + } + if key.compress { + c.newCompressionWriter = compressNoContextTakeover + } + err = c.WriteMessage(pm.messageType, pm.data) + frame.data = nc.buf.Bytes() + }) + return pm.messageType, frame.data, err +} + +type prepareConn struct { + buf bytes.Buffer + net.Conn +} + +func (pc *prepareConn) Write(p []byte) (int, error) { return pc.buf.Write(p) } +func (pc *prepareConn) SetWriteDeadline(t time.Time) error { return nil } diff --git a/src/stackdriver-nozzle/vendor/github.com/gorilla/websocket/server.go b/src/stackdriver-nozzle/vendor/github.com/gorilla/websocket/server.go index 8402d20b..6ae97c54 100644 --- a/src/stackdriver-nozzle/vendor/github.com/gorilla/websocket/server.go +++ b/src/stackdriver-nozzle/vendor/github.com/gorilla/websocket/server.go @@ -28,8 +28,9 @@ type Upgrader struct { HandshakeTimeout time.Duration // ReadBufferSize and WriteBufferSize specify I/O buffer sizes. If a buffer - // size is zero, then a default value of 4096 is used. The I/O buffer sizes - // do not limit the size of the messages that can be sent or received. + // size is zero, then buffers allocated by the HTTP server are used. The + // I/O buffer sizes do not limit the size of the messages that can be sent + // or received. ReadBufferSize, WriteBufferSize int // Subprotocols specifies the server's supported protocols in order of @@ -46,6 +47,12 @@ type Upgrader struct { // CheckOrigin is nil, the host in the Origin header must not be set or // must match the host of the request. CheckOrigin func(r *http.Request) bool + + // EnableCompression specify if the server should attempt to negotiate per + // message compression (RFC 7692). Setting this value to true does not + // guarantee that compression will be supported. Currently only "no context + // takeover" modes are supported. + EnableCompression bool } func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) { @@ -98,18 +105,23 @@ func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header // response. func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) { if r.Method != "GET" { - return u.returnError(w, r, http.StatusMethodNotAllowed, "websocket: method not GET") + return u.returnError(w, r, http.StatusMethodNotAllowed, "websocket: not a websocket handshake: request method is not GET") } - if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") { - return u.returnError(w, r, http.StatusBadRequest, "websocket: version != 13") + + if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok { + return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-Websocket-Extensions' headers are unsupported") } if !tokenListContainsValue(r.Header, "Connection", "upgrade") { - return u.returnError(w, r, http.StatusBadRequest, "websocket: could not find connection header with token 'upgrade'") + return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'upgrade' token not found in 'Connection' header") } if !tokenListContainsValue(r.Header, "Upgrade", "websocket") { - return u.returnError(w, r, http.StatusBadRequest, "websocket: could not find upgrade header with token 'websocket'") + return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'websocket' token not found in 'Upgrade' header") + } + + if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") { + return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header") } checkOrigin := u.CheckOrigin @@ -117,19 +129,30 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade checkOrigin = checkSameOrigin } if !checkOrigin(r) { - return u.returnError(w, r, http.StatusForbidden, "websocket: origin not allowed") + return u.returnError(w, r, http.StatusForbidden, "websocket: 'Origin' header value not allowed") } challengeKey := r.Header.Get("Sec-Websocket-Key") if challengeKey == "" { - return u.returnError(w, r, http.StatusBadRequest, "websocket: key missing or blank") + return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: `Sec-Websocket-Key' header is missing or blank") } subprotocol := u.selectSubprotocol(r, responseHeader) + // Negotiate PMCE + var compress bool + if u.EnableCompression { + for _, ext := range parseExtensions(r.Header) { + if ext[""] != "permessage-deflate" { + continue + } + compress = true + break + } + } + var ( netConn net.Conn - br *bufio.Reader err error ) @@ -137,21 +160,25 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade if !ok { return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker") } - var rw *bufio.ReadWriter - netConn, rw, err = h.Hijack() + var brw *bufio.ReadWriter + netConn, brw, err = h.Hijack() if err != nil { return u.returnError(w, r, http.StatusInternalServerError, err.Error()) } - br = rw.Reader - if br.Buffered() > 0 { + if brw.Reader.Buffered() > 0 { netConn.Close() return nil, errors.New("websocket: client sent data before handshake is complete") } - c := newConn(netConn, true, u.ReadBufferSize, u.WriteBufferSize) + c := newConnBRW(netConn, true, u.ReadBufferSize, u.WriteBufferSize, brw) c.subprotocol = subprotocol + if compress { + c.newCompressionWriter = compressNoContextTakeover + c.newDecompressionReader = decompressNoContextTakeover + } + p := c.writeBuf[:0] p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...) p = append(p, computeAcceptKey(challengeKey)...) @@ -161,6 +188,9 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade p = append(p, c.subprotocol...) p = append(p, "\r\n"...) } + if compress { + p = append(p, "Sec-Websocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...) + } for k, vs := range responseHeader { if k == "Sec-Websocket-Protocol" { continue @@ -200,10 +230,11 @@ func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeade // Upgrade upgrades the HTTP server connection to the WebSocket protocol. // -// This function is deprecated, use websocket.Upgrader instead. +// Deprecated: Use websocket.Upgrader instead. // -// The application is responsible for checking the request origin before -// calling Upgrade. An example implementation of the same origin policy is: +// Upgrade does not perform origin checking. The application is responsible for +// checking the Origin header before calling Upgrade. An example implementation +// of the same origin policy check is: // // if req.Header.Get("Origin") != "http://"+req.Host { // http.Error(w, "Origin not allowed", 403) diff --git a/src/stackdriver-nozzle/vendor/github.com/gorilla/websocket/util.go b/src/stackdriver-nozzle/vendor/github.com/gorilla/websocket/util.go index 9a4908df..262e647b 100644 --- a/src/stackdriver-nozzle/vendor/github.com/gorilla/websocket/util.go +++ b/src/stackdriver-nozzle/vendor/github.com/gorilla/websocket/util.go @@ -111,14 +111,14 @@ func nextTokenOrQuoted(s string) (value string, rest string) { case escape: escape = false p[j] = b - j += 1 + j++ case b == '\\': escape = true case b == '"': return string(p[:j]), s[i+1:] default: p[j] = b - j += 1 + j++ } } return "", "" diff --git a/src/stackdriver-nozzle/vendor/github.com/kelseyhightower/envconfig/README.md b/src/stackdriver-nozzle/vendor/github.com/kelseyhightower/envconfig/README.md index 05e6900c..3e8fdc69 100644 --- a/src/stackdriver-nozzle/vendor/github.com/kelseyhightower/envconfig/README.md +++ b/src/stackdriver-nozzle/vendor/github.com/kelseyhightower/envconfig/README.md @@ -1,6 +1,6 @@ # envconfig -[![Build Status](https://travis-ci.org/kelseyhightower/envconfig.png)](https://travis-ci.org/kelseyhightower/envconfig) +[![Build Status](https://travis-ci.org/kelseyhightower/envconfig.svg)](https://travis-ci.org/kelseyhightower/envconfig) ```Go import "github.com/kelseyhightower/envconfig" @@ -21,6 +21,7 @@ export MYAPP_USER=Kelsey export MYAPP_RATE="0.5" export MYAPP_TIMEOUT="3m" export MYAPP_USERS="rob,ken,robert" +export MYAPP_COLORCODES="red:1,green:2,blue:3" ``` Write some code: @@ -37,12 +38,13 @@ import ( ) type Specification struct { - Debug bool - Port int - User string - Users []string - Rate float32 - Timeout time.Duration + Debug bool + Port int + User string + Users []string + Rate float32 + Timeout time.Duration + ColorCodes map[string]int } func main() { @@ -52,7 +54,7 @@ func main() { log.Fatal(err.Error()) } format := "Debug: %v\nPort: %d\nUser: %s\nRate: %f\nTimeout: %s\n" - _, err = fmt.Printf(format, s.Debug, s.Port, s.User, s.Rate) + _, err = fmt.Printf(format, s.Debug, s.Port, s.User, s.Rate, s.Timeout) if err != nil { log.Fatal(err.Error()) } @@ -61,6 +63,11 @@ func main() { for _, u := range s.Users { fmt.Printf(" %s\n", u) } + + fmt.Println("Color codes:") + for k, v := range s.ColorCodes { + fmt.Printf(" %s: %d\n", k, v) + } } ``` @@ -76,6 +83,10 @@ Users: rob ken robert +Color codes: + red: 1 + green: 2 + blue: 3 ``` ## Struct Tag Support @@ -87,20 +98,30 @@ For example, consider the following struct: ```Go type Specification struct { - MultiWordVar string `envconfig:"multi_word_var"` - DefaultVar string `default:"foobar"` - RequiredVar string `required:"true"` - IgnoredVar string `ignored:"true"` + ManualOverride1 string `envconfig:"manual_override_1"` + DefaultVar string `default:"foobar"` + RequiredVar string `required:"true"` + IgnoredVar string `ignored:"true"` + AutoSplitVar string `split_words:"true"` } ``` -Envconfig will process value for `MultiWordVar` by populating it with the -value for `MYAPP_MULTI_WORD_VAR`. +Envconfig has automatic support for CamelCased struct elements when the +`split_words:"true"` tag is supplied. Without this tag, `AutoSplitVar` above +would look for an environment variable called `MYAPP_AUTOSPLITVAR`. With the +setting applied it will look for `MYAPP_AUTO_SPLIT_VAR`. Note that numbers +will get globbed into the previous word. If the setting does not do the +right thing, you may use a manual override. + +Envconfig will process value for `ManualOverride1` by populating it with the +value for `MYAPP_MANUAL_OVERRIDE_1`. Without this struct tag, it would have +instead looked up `MYAPP_MANUALOVERRIDE1`. With the `split_words:"true"` tag +it would have looked up `MYAPP_MANUAL_OVERRIDE1`. ```Bash -export MYAPP_MULTI_WORD_VAR="this will be the value" +export MYAPP_MANUAL_OVERRIDE_1="this will be the value" -# export MYAPP_MULTIWORDVAR="and this will not" +# export MYAPP_MANUALOVERRIDE1="and this will not" ``` If envconfig can't find an environment variable value for `MYAPP_DEFAULTVAR`, @@ -135,6 +156,8 @@ envconfig supports supports these struct field types: * int8, int16, int32, int64 * bool * float32, float64 + * slices of any supported type + * maps (keys and values of any supported type) * [encoding.TextUnmarshaler](https://golang.org/pkg/encoding/#TextUnmarshaler) Embedded structs using these fields are also supported. diff --git a/src/stackdriver-nozzle/vendor/github.com/kelseyhightower/envconfig/env_os.go b/src/stackdriver-nozzle/vendor/github.com/kelseyhightower/envconfig/env_os.go new file mode 100644 index 00000000..a6a014a2 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/kelseyhightower/envconfig/env_os.go @@ -0,0 +1,7 @@ +// +build appengine + +package envconfig + +import "os" + +var lookupEnv = os.LookupEnv diff --git a/src/stackdriver-nozzle/vendor/github.com/kelseyhightower/envconfig/env_syscall.go b/src/stackdriver-nozzle/vendor/github.com/kelseyhightower/envconfig/env_syscall.go new file mode 100644 index 00000000..9d98085b --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/kelseyhightower/envconfig/env_syscall.go @@ -0,0 +1,7 @@ +// +build !appengine + +package envconfig + +import "syscall" + +var lookupEnv = syscall.Getenv diff --git a/src/stackdriver-nozzle/vendor/github.com/kelseyhightower/envconfig/envconfig.go b/src/stackdriver-nozzle/vendor/github.com/kelseyhightower/envconfig/envconfig.go index 7d16fe3c..a2e00b4f 100644 --- a/src/stackdriver-nozzle/vendor/github.com/kelseyhightower/envconfig/envconfig.go +++ b/src/stackdriver-nozzle/vendor/github.com/kelseyhightower/envconfig/envconfig.go @@ -9,9 +9,9 @@ import ( "errors" "fmt" "reflect" + "regexp" "strconv" "strings" - "syscall" "time" ) @@ -44,22 +44,35 @@ func (e *ParseError) Error() string { return fmt.Sprintf("envconfig.Process: assigning %[1]s to %[2]s: converting '%[3]s' to type %[4]s. details: %[5]s", e.KeyName, e.FieldName, e.Value, e.TypeName, e.Err) } -// Process populates the specified struct based on environment variables -func Process(prefix string, spec interface{}) error { +// varInfo maintains information about the configuration variable +type varInfo struct { + Name string + Alt string + Key string + Field reflect.Value + Tags reflect.StructTag +} + +// GatherInfo gathers information about the specified struct +func gatherInfo(prefix string, spec interface{}) ([]varInfo, error) { + expr := regexp.MustCompile("([^A-Z]+|[A-Z][^A-Z]+|[A-Z]+)") s := reflect.ValueOf(spec) if s.Kind() != reflect.Ptr { - return ErrInvalidSpecification + return nil, ErrInvalidSpecification } s = s.Elem() if s.Kind() != reflect.Struct { - return ErrInvalidSpecification + return nil, ErrInvalidSpecification } typeOfSpec := s.Type() + + // over allocate an info array, we will extend if needed later + infos := make([]varInfo, 0, s.NumField()) for i := 0; i < s.NumField(); i++ { f := s.Field(i) ftype := typeOfSpec.Field(i) - if !f.CanSet() || ftype.Tag.Get("ignored") == "true" { + if !f.CanSet() || isTrue(ftype.Tag.Get("ignored")) { continue } @@ -75,70 +88,101 @@ func Process(prefix string, spec interface{}) error { f = f.Elem() } - alt := ftype.Tag.Get("envconfig") - fieldName := ftype.Name - if alt != "" { - fieldName = alt + // Capture information about the config variable + info := varInfo{ + Name: ftype.Name, + Field: f, + Tags: ftype.Tag, + Alt: strings.ToUpper(ftype.Tag.Get("envconfig")), } - key := fieldName + // Default to the field name as the env var name (will be upcased) + info.Key = info.Name + + // Best effort to un-pick camel casing as separate words + if isTrue(ftype.Tag.Get("split_words")) { + words := expr.FindAllStringSubmatch(ftype.Name, -1) + if len(words) > 0 { + var name []string + for _, words := range words { + name = append(name, words[0]) + } + + info.Key = strings.Join(name, "_") + } + } + if info.Alt != "" { + info.Key = info.Alt + } if prefix != "" { - key = fmt.Sprintf("%s_%s", prefix, key) + info.Key = fmt.Sprintf("%s_%s", prefix, info.Key) } - key = strings.ToUpper(key) + info.Key = strings.ToUpper(info.Key) + infos = append(infos, info) if f.Kind() == reflect.Struct { // honor Decode if present if decoderFrom(f) == nil && setterFrom(f) == nil && textUnmarshaler(f) == nil { innerPrefix := prefix if !ftype.Anonymous { - innerPrefix = key + innerPrefix = info.Key } embeddedPtr := f.Addr().Interface() - if err := Process(innerPrefix, embeddedPtr); err != nil { - return err + embeddedInfos, err := gatherInfo(innerPrefix, embeddedPtr) + if err != nil { + return nil, err } - f.Set(reflect.ValueOf(embeddedPtr).Elem()) + infos = append(infos[:len(infos)-1], embeddedInfos...) continue } } + } + return infos, nil +} + +// Process populates the specified struct based on environment variables +func Process(prefix string, spec interface{}) error { + infos, err := gatherInfo(prefix, spec) + + for _, info := range infos { // `os.Getenv` cannot differentiate between an explicitly set empty value // and an unset value. `os.LookupEnv` is preferred to `syscall.Getenv`, - // but it is only available in go1.5 or newer. - value, ok := syscall.Getenv(key) - if !ok && alt != "" { - key := strings.ToUpper(fieldName) - value, ok = syscall.Getenv(key) + // but it is only available in go1.5 or newer. We're using Go build tags + // here to use os.LookupEnv for >=go1.5 + value, ok := lookupEnv(info.Key) + if !ok && info.Alt != "" { + value, ok = lookupEnv(info.Alt) } - def := ftype.Tag.Get("default") + def := info.Tags.Get("default") if def != "" && !ok { value = def } - req := ftype.Tag.Get("required") + req := info.Tags.Get("required") if !ok && def == "" { - if req == "true" { - return fmt.Errorf("required key %s missing value", key) + if isTrue(req) { + return fmt.Errorf("required key %s missing value", info.Key) } continue } - err := processField(value, f) + err := processField(value, info.Field) if err != nil { return &ParseError{ - KeyName: key, - FieldName: fieldName, - TypeName: f.Type().String(), + KeyName: info.Key, + FieldName: info.Name, + TypeName: info.Field.Type().String(), Value: value, Err: err, } } } - return nil + + return err } // MustProcess is the same as Process but panics if an error occurs @@ -221,6 +265,27 @@ func processField(value string, field reflect.Value) error { } } field.Set(sl) + case reflect.Map: + pairs := strings.Split(value, ",") + mp := reflect.MakeMap(typ) + for _, pair := range pairs { + kvpair := strings.Split(pair, ":") + if len(kvpair) != 2 { + return fmt.Errorf("invalid map item: %q", pair) + } + k := reflect.New(typ.Key()).Elem() + err := processField(kvpair[0], k) + if err != nil { + return err + } + v := reflect.New(typ.Elem()).Elem() + err = processField(kvpair[1], v) + if err != nil { + return err + } + mp.SetMapIndex(k, v) + } + field.Set(mp) } return nil @@ -252,3 +317,8 @@ func textUnmarshaler(field reflect.Value) (t encoding.TextUnmarshaler) { interfaceFrom(field, func(v interface{}, ok *bool) { t, *ok = v.(encoding.TextUnmarshaler) }) return t } + +func isTrue(s string) bool { + b, _ := strconv.ParseBool(s) + return b +} diff --git a/src/stackdriver-nozzle/vendor/github.com/kelseyhightower/envconfig/usage.go b/src/stackdriver-nozzle/vendor/github.com/kelseyhightower/envconfig/usage.go new file mode 100644 index 00000000..089f8c8a --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/kelseyhightower/envconfig/usage.go @@ -0,0 +1,158 @@ +// Copyright (c) 2016 Kelsey Hightower and others. All rights reserved. +// Use of this source code is governed by the MIT License that can be found in +// the LICENSE file. + +package envconfig + +import ( + "encoding" + "fmt" + "io" + "os" + "reflect" + "strconv" + "strings" + "text/tabwriter" + "text/template" +) + +const ( + // DefaultListFormat constant to use to display usage in a list format + DefaultListFormat = `This application is configured via the environment. The following environment +variables can be used: +{{range .}} +{{usage_key .}} + [description] {{usage_description .}} + [type] {{usage_type .}} + [default] {{usage_default .}} + [required] {{usage_required .}}{{end}} +` + // DefaultTableFormat constant to use to display usage in a tabular format + DefaultTableFormat = `This application is configured via the environment. The following environment +variables can be used: + +KEY TYPE DEFAULT REQUIRED DESCRIPTION +{{range .}}{{usage_key .}} {{usage_type .}} {{usage_default .}} {{usage_required .}} {{usage_description .}} +{{end}}` +) + +var ( + decoderType = reflect.TypeOf((*Decoder)(nil)).Elem() + setterType = reflect.TypeOf((*Setter)(nil)).Elem() + unmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() +) + +func implementsInterface(t reflect.Type) bool { + return t.Implements(decoderType) || + reflect.PtrTo(t).Implements(decoderType) || + t.Implements(setterType) || + reflect.PtrTo(t).Implements(setterType) || + t.Implements(unmarshalerType) || + reflect.PtrTo(t).Implements(unmarshalerType) +} + +// toTypeDescription converts Go types into a human readable description +func toTypeDescription(t reflect.Type) string { + switch t.Kind() { + case reflect.Array, reflect.Slice: + return fmt.Sprintf("Comma-separated list of %s", toTypeDescription(t.Elem())) + case reflect.Map: + return fmt.Sprintf( + "Comma-separated list of %s:%s pairs", + toTypeDescription(t.Key()), + toTypeDescription(t.Elem()), + ) + case reflect.Ptr: + return toTypeDescription(t.Elem()) + case reflect.Struct: + if implementsInterface(t) && t.Name() != "" { + return t.Name() + } + return "" + case reflect.String: + name := t.Name() + if name != "" && name != "string" { + return name + } + return "String" + case reflect.Bool: + name := t.Name() + if name != "" && name != "bool" { + return name + } + return "True or False" + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + name := t.Name() + if name != "" && !strings.HasPrefix(name, "int") { + return name + } + return "Integer" + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + name := t.Name() + if name != "" && !strings.HasPrefix(name, "uint") { + return name + } + return "Unsigned Integer" + case reflect.Float32, reflect.Float64: + name := t.Name() + if name != "" && !strings.HasPrefix(name, "float") { + return name + } + return "Float" + } + return fmt.Sprintf("%+v", t) +} + +// Usage writes usage information to stderr using the default header and table format +func Usage(prefix string, spec interface{}) error { + // The default is to output the usage information as a table + // Create tabwriter instance to support table output + tabs := tabwriter.NewWriter(os.Stdout, 1, 0, 4, ' ', 0) + + err := Usagef(prefix, spec, tabs, DefaultTableFormat) + tabs.Flush() + return err +} + +// Usagef writes usage information to the specified io.Writer using the specifed template specification +func Usagef(prefix string, spec interface{}, out io.Writer, format string) error { + + // Specify the default usage template functions + functions := template.FuncMap{ + "usage_key": func(v varInfo) string { return v.Key }, + "usage_description": func(v varInfo) string { return v.Tags.Get("desc") }, + "usage_type": func(v varInfo) string { return toTypeDescription(v.Field.Type()) }, + "usage_default": func(v varInfo) string { return v.Tags.Get("default") }, + "usage_required": func(v varInfo) (string, error) { + req := v.Tags.Get("required") + if req != "" { + reqB, err := strconv.ParseBool(req) + if err != nil { + return "", err + } + if reqB { + req = "true" + } + } + return req, nil + }, + } + + tmpl, err := template.New("envconfig").Funcs(functions).Parse(format) + if err != nil { + return err + } + + return Usaget(prefix, spec, out, tmpl) +} + +// Usaget writes usage information to the specified io.Writer using the specified template +func Usaget(prefix string, spec interface{}, out io.Writer, tmpl *template.Template) error { + // gather first + infos, err := gatherInfo(prefix, spec) + if err != nil { + return err + } + + return tmpl.Execute(out, infos) +} diff --git a/src/stackdriver-nozzle/vendor/github.com/mailru/easyjson/LICENSE b/src/stackdriver-nozzle/vendor/github.com/mailru/easyjson/LICENSE new file mode 100644 index 00000000..fbff658f --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/mailru/easyjson/LICENSE @@ -0,0 +1,7 @@ +Copyright (c) 2016 Mail.Ru Group + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/src/stackdriver-nozzle/vendor/github.com/mailru/easyjson/Makefile b/src/stackdriver-nozzle/vendor/github.com/mailru/easyjson/Makefile new file mode 100644 index 00000000..ea591b0c --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/mailru/easyjson/Makefile @@ -0,0 +1,55 @@ +PKG=github.com/mailru/easyjson +GOPATH:=$(PWD)/.root:$(GOPATH) +export GOPATH + +all: test + +.root/src/$(PKG): + mkdir -p $@ + for i in $$PWD/* ; do ln -s $$i $@/`basename $$i` ; done + +root: .root/src/$(PKG) + +clean: + rm -rf .root + +build: + go build -i -o .root/bin/easyjson $(PKG)/easyjson + +generate: root build + .root/bin/easyjson -stubs \ + .root/src/$(PKG)/tests/snake.go \ + .root/src/$(PKG)/tests/data.go \ + .root/src/$(PKG)/tests/omitempty.go \ + .root/src/$(PKG)/tests/nothing.go \ + .root/src/$(PKG)/tests/named_type.go + + .root/bin/easyjson -all .root/src/$(PKG)/tests/data.go + .root/bin/easyjson -all .root/src/$(PKG)/tests/nothing.go + .root/bin/easyjson -all .root/src/$(PKG)/tests/errors.go + .root/bin/easyjson -snake_case .root/src/$(PKG)/tests/snake.go + .root/bin/easyjson -omit_empty .root/src/$(PKG)/tests/omitempty.go + .root/bin/easyjson -build_tags=use_easyjson .root/src/$(PKG)/benchmark/data.go + .root/bin/easyjson .root/src/$(PKG)/tests/nested_easy.go + .root/bin/easyjson .root/src/$(PKG)/tests/named_type.go + +test: generate root + go test \ + $(PKG)/tests \ + $(PKG)/jlexer \ + $(PKG)/gen \ + $(PKG)/buffer + go test -benchmem -tags use_easyjson -bench . $(PKG)/benchmark + golint -set_exit_status .root/src/$(PKG)/tests/*_easyjson.go + +bench-other: generate root + @go test -benchmem -bench . $(PKG)/benchmark + @go test -benchmem -tags use_ffjson -bench . $(PKG)/benchmark + @go test -benchmem -tags use_jsoniter -bench . $(PKG)/benchmark + @go test -benchmem -tags use_codec -bench . $(PKG)/benchmark + +bench-python: + benchmark/ujson.sh + + +.PHONY: root clean generate test build diff --git a/src/stackdriver-nozzle/vendor/github.com/mailru/easyjson/README.md b/src/stackdriver-nozzle/vendor/github.com/mailru/easyjson/README.md new file mode 100644 index 00000000..9366e3f7 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/mailru/easyjson/README.md @@ -0,0 +1,331 @@ +# easyjson [![Build Status](https://travis-ci.org/mailru/easyjson.svg?branch=master)](https://travis-ci.org/mailru/easyjson) [![Go Report Card](https://goreportcard.com/badge/github.com/mailru/easyjson)](https://goreportcard.com/report/github.com/mailru/easyjson) + +Package easyjson provides a fast and easy way to marshal/unmarshal Go structs +to/from JSON without the use of reflection. In performance tests, easyjson +outperforms the standard `encoding/json` package by a factor of 4-5x, and other +JSON encoding packages by a factor of 2-3x. + +easyjson aims to keep generated Go code simple enough so that it can be easily +optimized or fixed. Another goal is to provide users with the ability to +customize the generated code by providing options not available with the +standard `encoding/json` package, such as generating "snake_case" names or +enabling `omitempty` behavior by default. + +## Usage +```sh +# install +go get -u github.com/mailru/easyjson/... + +# run +easyjson -all .go +``` + +The above will generate `_easyjson.go` containing the appropriate marshaler and +unmarshaler funcs for all structs contained in `.go`. + +Please note that easyjson requires a full Go build environment and the `GOPATH` +environment variable to be set. This is because easyjson code generation +invokes `go run` on a temporary file (an approach to code generation borrowed +from [ffjson](https://github.com/pquerna/ffjson)). + +## Options +```txt +Usage of easyjson: + -all + generate marshaler/unmarshalers for all structs in a file + -build_tags string + build tags to add to generated file + -leave_temps + do not delete temporary files + -no_std_marshalers + don't generate MarshalJSON/UnmarshalJSON funcs + -noformat + do not run 'gofmt -w' on output file + -omit_empty + omit empty fields by default + -output_filename string + specify the filename of the output + -pkg + process the whole package instead of just the given file + -snake_case + use snake_case names instead of CamelCase by default + -lower_camel_case + use lowerCamelCase instead of CamelCase by default + -stubs + only generate stubs for marshaler/unmarshaler funcs +``` + +Using `-all` will generate marshalers/unmarshalers for all Go structs in the +file. If `-all` is not provided, then only those structs whose preceding +comment starts with `easyjson:json` will have marshalers/unmarshalers +generated. For example: + +```go +//easyjson:json +type A struct {} +``` + +Additional option notes: + +* `-snake_case` tells easyjson to generate snake\_case field names by default + (unless overridden by a field tag). The CamelCase to snake\_case conversion + algorithm should work in most cases (ie, HTTPVersion will be converted to + "http_version"). + +* `-build_tags` will add the specified build tags to generated Go sources. + +## Generated Marshaler/Unmarshaler Funcs + +For Go struct types, easyjson generates the funcs `MarshalEasyJSON` / +`UnmarshalEasyJSON` for marshaling/unmarshaling JSON. In turn, these satisify +the `easyjson.Marshaler` and `easyjson.Unmarshaler` interfaces and when used in +conjunction with `easyjson.Marshal` / `easyjson.Unmarshal` avoid unnecessary +reflection / type assertions during marshaling/unmarshaling to/from JSON for Go +structs. + +easyjson also generates `MarshalJSON` and `UnmarshalJSON` funcs for Go struct +types compatible with the standard `json.Marshaler` and `json.Unmarshaler` +interfaces. Please be aware that using the standard `json.Marshal` / +`json.Unmarshal` for marshaling/unmarshaling will incur a significant +performance penalty when compared to using `easyjson.Marshal` / +`easyjson.Unmarshal`. + +Additionally, easyjson exposes utility funcs that use the `MarshalEasyJSON` and +`UnmarshalEasyJSON` for marshaling/unmarshaling to and from standard readers +and writers. For example, easyjson provides `easyjson.MarshalToHTTPResponseWriter` +which marshals to the standard `http.ResponseWriter`. Please see the [GoDoc +listing](https://godoc.org/github.com/mailru/easyjson) for the full listing of +utility funcs that are available. + +## Controlling easyjson Marshaling and Unmarshaling Behavior + +Go types can provide their own `MarshalEasyJSON` and `UnmarshalEasyJSON` funcs +that satisify the `easyjson.Marshaler` / `easyjson.Unmarshaler` interfaces. +These will be used by `easyjson.Marshal` and `easyjson.Unmarshal` when defined +for a Go type. + +Go types can also satisify the `easyjson.Optional` interface, which allows the +type to define its own `omitempty` logic. + +## Type Wrappers + +easyjson provides additional type wrappers defined in the `easyjson/opt` +package. These wrap the standard Go primitives and in turn satisify the +easyjson interfaces. + +The `easyjson/opt` type wrappers are useful when needing to distinguish between +a missing value and/or when needing to specifying a default value. Type +wrappers allow easyjson to avoid additional pointers and heap allocations and +can significantly increase performance when used properly. + +## Memory Pooling + +easyjson uses a buffer pool that allocates data in increasing chunks from 128 +to 32768 bytes. Chunks of 512 bytes and larger will be reused with the help of +`sync.Pool`. The maximum size of a chunk is bounded to reduce redundant memory +allocation and to allow larger reusable buffers. + +easyjson's custom allocation buffer pool is defined in the `easyjson/buffer` +package, and the default behavior pool behavior can be modified (if necessary) +through a call to `buffer.Init()` prior to any marshaling or unmarshaling. +Please see the [GoDoc listing](https://godoc.org/github.com/mailru/easyjson/buffer) +for more information. + +## Issues, Notes, and Limitations + +* easyjson is still early in its development. As such, there are likely to be + bugs and missing features when compared to `encoding/json`. In the case of a + missing feature or bug, please create a GitHub issue. Pull requests are + welcome! + +* Unlike `encoding/json`, object keys are case-sensitive. Case-insensitive + matching is not currently provided due to the significant performance hit + when doing case-insensitive key matching. In the future, case-insensitive + object key matching may be provided via an option to the generator. + +* easyjson makes use of `unsafe`, which simplifies the code and + provides significant performance benefits by allowing no-copy + conversion from `[]byte` to `string`. That said, `unsafe` is used + only when unmarshaling and parsing JSON, and any `unsafe` operations + / memory allocations done will be safely deallocated by + easyjson. Set the build tag `easyjson_nounsafe` to compile it + without `unsafe`. + +* easyjson is compatible with Google App Engine. The `appengine` build + tag (set by App Engine's environment) will automatically disable the + use of `unsafe`, which is not allowed in App Engine's Standard + Environment. Note that the use with App Engine is still experimental. + +* Floats are formatted using the default precision from Go's `strconv` package. + As such, easyjson will not correctly handle high precision floats when + marshaling/unmarshaling JSON. Note, however, that there are very few/limited + uses where this behavior is not sufficient for general use. That said, a + different package may be needed if precise marshaling/unmarshaling of high + precision floats to/from JSON is required. + +* While unmarshaling, the JSON parser does the minimal amount of work needed to + skip over unmatching parens, and as such full validation is not done for the + entire JSON value being unmarshaled/parsed. + +* Currently there is no true streaming support for encoding/decoding as + typically for many uses/protocols the final, marshaled length of the JSON + needs to be known prior to sending the data. Currently this is not possible + with easyjson's architecture. + +## Benchmarks + +Most benchmarks were done using the example +[13kB example JSON](https://dev.twitter.com/rest/reference/get/search/tweets) +(9k after eliminating whitespace). This example is similar to real-world data, +is well-structured, and contains a healthy variety of different types, making +it ideal for JSON serialization benchmarks. + +Note: + +* For small request benchmarks, an 80 byte portion of the above example was + used. + +* For large request marshaling benchmarks, a struct containing 50 regular + samples was used, making a ~500kB output JSON. + +* Benchmarks are showing the results of easyjson's default behaviour, + which makes use of `unsafe`. + +Benchmarks are available in the repository and can be run by invoking `make`. + +### easyjson vs. encoding/json + +easyjson is roughly 5-6 times faster than the standard `encoding/json` for +unmarshaling, and 3-4 times faster for non-concurrent marshaling. Concurrent +marshaling is 6-7x faster if marshaling to a writer. + +### easyjson vs. ffjson + +easyjson uses the same approach for JSON marshaling as +[ffjson](https://github.com/pquerna/ffjson), but takes a significantly +different approach to lexing and parsing JSON during unmarshaling. This means +easyjson is roughly 2-3x faster for unmarshaling and 1.5-2x faster for +non-concurrent unmarshaling. + +As of this writing, `ffjson` seems to have issues when used concurrently: +specifically, large request pooling hurts `ffjson`'s performance and causes +scalability issues. These issues with `ffjson` can likely be fixed, but as of +writing remain outstanding/known issues with `ffjson`. + +easyjson and `ffjson` have similar performance for small requests, however +easyjson outperforms `ffjson` by roughly 2-5x times for large requests when +used with a writer. + +### easyjson vs. go/codec + +[go/codec](https://github.com/ugorji/go) provides +compile-time helpers for JSON generation. In this case, helpers do not work +like marshalers as they are encoding-independent. + +easyjson is generally 2x faster than `go/codec` for non-concurrent benchmarks +and about 3x faster for concurrent encoding (without marshaling to a writer). + +In an attempt to measure marshaling performance of `go/codec` (as opposed to +allocations/memcpy/writer interface invocations), a benchmark was done with +resetting length of a byte slice rather than resetting the whole slice to nil. +However, the optimization in this exact form may not be applicable in practice, +since the memory is not freed between marshaling operations. + +### easyjson vs 'ujson' python module + +[ujson](https://github.com/esnme/ultrajson) is using C code for parsing, so it +is interesting to see how plain golang compares to that. It is imporant to note +that the resulting object for python is slower to access, since the library +parses JSON object into dictionaries. + +easyjson is slightly faster for unmarshaling and 2-3x faster than `ujson` for +marshaling. + +### Benchmark Results + +`ffjson` results are from February 4th, 2016, using the latest `ffjson` and go1.6. +`go/codec` results are from March 4th, 2016, using the latest `go/codec` and go1.6. + +#### Unmarshaling + +| lib | json size | MB/s | allocs/op | B/op | +|:---------|:----------|-----:|----------:|------:| +| standard | regular | 22 | 218 | 10229 | +| standard | small | 9.7 | 14 | 720 | +| | | | | | +| easyjson | regular | 125 | 128 | 9794 | +| easyjson | small | 67 | 3 | 128 | +| | | | | | +| ffjson | regular | 66 | 141 | 9985 | +| ffjson | small | 17.6 | 10 | 488 | +| | | | | | +| codec | regular | 55 | 434 | 19299 | +| codec | small | 29 | 7 | 336 | +| | | | | | +| ujson | regular | 103 | N/A | N/A | + +#### Marshaling, one goroutine. + +| lib | json size | MB/s | allocs/op | B/op | +|:----------|:----------|-----:|----------:|------:| +| standard | regular | 75 | 9 | 23256 | +| standard | small | 32 | 3 | 328 | +| standard | large | 80 | 17 | 1.2M | +| | | | | | +| easyjson | regular | 213 | 9 | 10260 | +| easyjson* | regular | 263 | 8 | 742 | +| easyjson | small | 125 | 1 | 128 | +| easyjson | large | 212 | 33 | 490k | +| easyjson* | large | 262 | 25 | 2879 | +| | | | | | +| ffjson | regular | 122 | 153 | 21340 | +| ffjson** | regular | 146 | 152 | 4897 | +| ffjson | small | 36 | 5 | 384 | +| ffjson** | small | 64 | 4 | 128 | +| ffjson | large | 134 | 7317 | 818k | +| ffjson** | large | 125 | 7320 | 827k | +| | | | | | +| codec | regular | 80 | 17 | 33601 | +| codec*** | regular | 108 | 9 | 1153 | +| codec | small | 42 | 3 | 304 | +| codec*** | small | 56 | 1 | 48 | +| codec | large | 73 | 483 | 2.5M | +| codec*** | large | 103 | 451 | 66007 | +| | | | | | +| ujson | regular | 92 | N/A | N/A | + +\* marshaling to a writer, +\*\* using `ffjson.Pool()`, +\*\*\* reusing output slice instead of resetting it to nil + +#### Marshaling, concurrent. + +| lib | json size | MB/s | allocs/op | B/op | +|:----------|:----------|-----:|----------:|------:| +| standard | regular | 252 | 9 | 23257 | +| standard | small | 124 | 3 | 328 | +| standard | large | 289 | 17 | 1.2M | +| | | | | | +| easyjson | regular | 792 | 9 | 10597 | +| easyjson* | regular | 1748 | 8 | 779 | +| easyjson | small | 333 | 1 | 128 | +| easyjson | large | 718 | 36 | 548k | +| easyjson* | large | 2134 | 25 | 4957 | +| | | | | | +| ffjson | regular | 301 | 153 | 21629 | +| ffjson** | regular | 707 | 152 | 5148 | +| ffjson | small | 62 | 5 | 384 | +| ffjson** | small | 282 | 4 | 128 | +| ffjson | large | 438 | 7330 | 1.0M | +| ffjson** | large | 131 | 7319 | 820k | +| | | | | | +| codec | regular | 183 | 17 | 33603 | +| codec*** | regular | 671 | 9 | 1157 | +| codec | small | 147 | 3 | 304 | +| codec*** | small | 299 | 1 | 48 | +| codec | large | 190 | 483 | 2.5M | +| codec*** | large | 752 | 451 | 77574 | + +\* marshaling to a writer, +\*\* using `ffjson.Pool()`, +\*\*\* reusing output slice instead of resetting it to nil diff --git a/src/stackdriver-nozzle/vendor/github.com/mailru/easyjson/buffer/pool.go b/src/stackdriver-nozzle/vendor/github.com/mailru/easyjson/buffer/pool.go new file mode 100644 index 00000000..07fb4bc1 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/mailru/easyjson/buffer/pool.go @@ -0,0 +1,270 @@ +// Package buffer implements a buffer for serialization, consisting of a chain of []byte-s to +// reduce copying and to allow reuse of individual chunks. +package buffer + +import ( + "io" + "sync" +) + +// PoolConfig contains configuration for the allocation and reuse strategy. +type PoolConfig struct { + StartSize int // Minimum chunk size that is allocated. + PooledSize int // Minimum chunk size that is reused, reusing chunks too small will result in overhead. + MaxSize int // Maximum chunk size that will be allocated. +} + +var config = PoolConfig{ + StartSize: 128, + PooledSize: 512, + MaxSize: 32768, +} + +// Reuse pool: chunk size -> pool. +var buffers = map[int]*sync.Pool{} + +func initBuffers() { + for l := config.PooledSize; l <= config.MaxSize; l *= 2 { + buffers[l] = new(sync.Pool) + } +} + +func init() { + initBuffers() +} + +// Init sets up a non-default pooling and allocation strategy. Should be run before serialization is done. +func Init(cfg PoolConfig) { + config = cfg + initBuffers() +} + +// putBuf puts a chunk to reuse pool if it can be reused. +func putBuf(buf []byte) { + size := cap(buf) + if size < config.PooledSize { + return + } + if c := buffers[size]; c != nil { + c.Put(buf[:0]) + } +} + +// getBuf gets a chunk from reuse pool or creates a new one if reuse failed. +func getBuf(size int) []byte { + if size < config.PooledSize { + return make([]byte, 0, size) + } + + if c := buffers[size]; c != nil { + v := c.Get() + if v != nil { + return v.([]byte) + } + } + return make([]byte, 0, size) +} + +// Buffer is a buffer optimized for serialization without extra copying. +type Buffer struct { + + // Buf is the current chunk that can be used for serialization. + Buf []byte + + toPool []byte + bufs [][]byte +} + +// EnsureSpace makes sure that the current chunk contains at least s free bytes, +// possibly creating a new chunk. +func (b *Buffer) EnsureSpace(s int) { + if cap(b.Buf)-len(b.Buf) >= s { + return + } + l := len(b.Buf) + if l > 0 { + if cap(b.toPool) != cap(b.Buf) { + // Chunk was reallocated, toPool can be pooled. + putBuf(b.toPool) + } + if cap(b.bufs) == 0 { + b.bufs = make([][]byte, 0, 8) + } + b.bufs = append(b.bufs, b.Buf) + l = cap(b.toPool) * 2 + } else { + l = config.StartSize + } + + if l > config.MaxSize { + l = config.MaxSize + } + b.Buf = getBuf(l) + b.toPool = b.Buf +} + +// AppendByte appends a single byte to buffer. +func (b *Buffer) AppendByte(data byte) { + if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined. + b.EnsureSpace(1) + } + b.Buf = append(b.Buf, data) +} + +// AppendBytes appends a byte slice to buffer. +func (b *Buffer) AppendBytes(data []byte) { + for len(data) > 0 { + if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined. + b.EnsureSpace(1) + } + + sz := cap(b.Buf) - len(b.Buf) + if sz > len(data) { + sz = len(data) + } + + b.Buf = append(b.Buf, data[:sz]...) + data = data[sz:] + } +} + +// AppendBytes appends a string to buffer. +func (b *Buffer) AppendString(data string) { + for len(data) > 0 { + if cap(b.Buf) == len(b.Buf) { // EnsureSpace won't be inlined. + b.EnsureSpace(1) + } + + sz := cap(b.Buf) - len(b.Buf) + if sz > len(data) { + sz = len(data) + } + + b.Buf = append(b.Buf, data[:sz]...) + data = data[sz:] + } +} + +// Size computes the size of a buffer by adding sizes of every chunk. +func (b *Buffer) Size() int { + size := len(b.Buf) + for _, buf := range b.bufs { + size += len(buf) + } + return size +} + +// DumpTo outputs the contents of a buffer to a writer and resets the buffer. +func (b *Buffer) DumpTo(w io.Writer) (written int, err error) { + var n int + for _, buf := range b.bufs { + if err == nil { + n, err = w.Write(buf) + written += n + } + putBuf(buf) + } + + if err == nil { + n, err = w.Write(b.Buf) + written += n + } + putBuf(b.toPool) + + b.bufs = nil + b.Buf = nil + b.toPool = nil + + return +} + +// BuildBytes creates a single byte slice with all the contents of the buffer. Data is +// copied if it does not fit in a single chunk. You can optionally provide one byte +// slice as argument that it will try to reuse. +func (b *Buffer) BuildBytes(reuse ...[]byte) []byte { + if len(b.bufs) == 0 { + ret := b.Buf + b.toPool = nil + b.Buf = nil + return ret + } + + var ret []byte + size := b.Size() + + // If we got a buffer as argument and it is big enought, reuse it. + if len(reuse) == 1 && cap(reuse[0]) >= size { + ret = reuse[0][:0] + } else { + ret = make([]byte, 0, size) + } + for _, buf := range b.bufs { + ret = append(ret, buf...) + putBuf(buf) + } + + ret = append(ret, b.Buf...) + putBuf(b.toPool) + + b.bufs = nil + b.toPool = nil + b.Buf = nil + + return ret +} + +type readCloser struct { + offset int + bufs [][]byte +} + +func (r *readCloser) Read(p []byte) (n int, err error) { + for _, buf := range r.bufs { + // Copy as much as we can. + x := copy(p[n:], buf[r.offset:]) + n += x // Increment how much we filled. + + // Did we empty the whole buffer? + if r.offset+x == len(buf) { + // On to the next buffer. + r.offset = 0 + r.bufs = r.bufs[1:] + + // We can release this buffer. + putBuf(buf) + } else { + r.offset += x + } + + if n == len(p) { + break + } + } + // No buffers left or nothing read? + if len(r.bufs) == 0 { + err = io.EOF + } + return +} + +func (r *readCloser) Close() error { + // Release all remaining buffers. + for _, buf := range r.bufs { + putBuf(buf) + } + // In case Close gets called multiple times. + r.bufs = nil + + return nil +} + +// ReadCloser creates an io.ReadCloser with all the contents of the buffer. +func (b *Buffer) ReadCloser() io.ReadCloser { + ret := &readCloser{0, append(b.bufs, b.Buf)} + + b.bufs = nil + b.toPool = nil + b.Buf = nil + + return ret +} diff --git a/src/stackdriver-nozzle/vendor/github.com/mailru/easyjson/helpers.go b/src/stackdriver-nozzle/vendor/github.com/mailru/easyjson/helpers.go new file mode 100644 index 00000000..b86b87d2 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/mailru/easyjson/helpers.go @@ -0,0 +1,78 @@ +// Package easyjson contains marshaler/unmarshaler interfaces and helper functions. +package easyjson + +import ( + "io" + "io/ioutil" + "net/http" + "strconv" + + "github.com/mailru/easyjson/jlexer" + "github.com/mailru/easyjson/jwriter" +) + +// Marshaler is an easyjson-compatible marshaler interface. +type Marshaler interface { + MarshalEasyJSON(w *jwriter.Writer) +} + +// Marshaler is an easyjson-compatible unmarshaler interface. +type Unmarshaler interface { + UnmarshalEasyJSON(w *jlexer.Lexer) +} + +// Optional defines an undefined-test method for a type to integrate with 'omitempty' logic. +type Optional interface { + IsDefined() bool +} + +// Marshal returns data as a single byte slice. Method is suboptimal as the data is likely to be copied +// from a chain of smaller chunks. +func Marshal(v Marshaler) ([]byte, error) { + w := jwriter.Writer{} + v.MarshalEasyJSON(&w) + return w.BuildBytes() +} + +// MarshalToWriter marshals the data to an io.Writer. +func MarshalToWriter(v Marshaler, w io.Writer) (written int, err error) { + jw := jwriter.Writer{} + v.MarshalEasyJSON(&jw) + return jw.DumpTo(w) +} + +// MarshalToHTTPResponseWriter sets Content-Length and Content-Type headers for the +// http.ResponseWriter, and send the data to the writer. started will be equal to +// false if an error occurred before any http.ResponseWriter methods were actually +// invoked (in this case a 500 reply is possible). +func MarshalToHTTPResponseWriter(v Marshaler, w http.ResponseWriter) (started bool, written int, err error) { + jw := jwriter.Writer{} + v.MarshalEasyJSON(&jw) + if jw.Error != nil { + return false, 0, jw.Error + } + w.Header().Set("Content-Type", "application/json") + w.Header().Set("Content-Length", strconv.Itoa(jw.Size())) + + started = true + written, err = jw.DumpTo(w) + return +} + +// Unmarshal decodes the JSON in data into the object. +func Unmarshal(data []byte, v Unmarshaler) error { + l := jlexer.Lexer{Data: data} + v.UnmarshalEasyJSON(&l) + return l.Error() +} + +// UnmarshalFromReader reads all the data in the reader and decodes as JSON into the object. +func UnmarshalFromReader(r io.Reader, v Unmarshaler) error { + data, err := ioutil.ReadAll(r) + if err != nil { + return err + } + l := jlexer.Lexer{Data: data} + v.UnmarshalEasyJSON(&l) + return l.Error() +} diff --git a/src/stackdriver-nozzle/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go b/src/stackdriver-nozzle/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go new file mode 100644 index 00000000..ff7b27c5 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/mailru/easyjson/jlexer/bytestostr.go @@ -0,0 +1,24 @@ +// This file will only be included to the build if neither +// easyjson_nounsafe nor appengine build tag is set. See README notes +// for more details. + +//+build !easyjson_nounsafe +//+build !appengine + +package jlexer + +import ( + "reflect" + "unsafe" +) + +// bytesToStr creates a string pointing at the slice to avoid copying. +// +// Warning: the string returned by the function should be used with care, as the whole input data +// chunk may be either blocked from being freed by GC because of a single string or the buffer.Data +// may be garbage-collected even when the string exists. +func bytesToStr(data []byte) string { + h := (*reflect.SliceHeader)(unsafe.Pointer(&data)) + shdr := reflect.StringHeader{Data: h.Data, Len: h.Len} + return *(*string)(unsafe.Pointer(&shdr)) +} diff --git a/src/stackdriver-nozzle/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go b/src/stackdriver-nozzle/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go new file mode 100644 index 00000000..864d1be6 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/mailru/easyjson/jlexer/bytestostr_nounsafe.go @@ -0,0 +1,13 @@ +// This file is included to the build if any of the buildtags below +// are defined. Refer to README notes for more details. + +//+build easyjson_nounsafe appengine + +package jlexer + +// bytesToStr creates a string normally from []byte +// +// Note that this method is roughly 1.5x slower than using the 'unsafe' method. +func bytesToStr(data []byte) string { + return string(data) +} diff --git a/src/stackdriver-nozzle/vendor/github.com/mailru/easyjson/jlexer/error.go b/src/stackdriver-nozzle/vendor/github.com/mailru/easyjson/jlexer/error.go new file mode 100644 index 00000000..e90ec40d --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/mailru/easyjson/jlexer/error.go @@ -0,0 +1,15 @@ +package jlexer + +import "fmt" + +// LexerError implements the error interface and represents all possible errors that can be +// generated during parsing the JSON data. +type LexerError struct { + Reason string + Offset int + Data string +} + +func (l *LexerError) Error() string { + return fmt.Sprintf("parse error: %s near offset %d of '%s'", l.Reason, l.Offset, l.Data) +} diff --git a/src/stackdriver-nozzle/vendor/github.com/mailru/easyjson/jlexer/lexer.go b/src/stackdriver-nozzle/vendor/github.com/mailru/easyjson/jlexer/lexer.go new file mode 100644 index 00000000..e81f1031 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/mailru/easyjson/jlexer/lexer.go @@ -0,0 +1,1114 @@ +// Package jlexer contains a JSON lexer implementation. +// +// It is expected that it is mostly used with generated parser code, so the interface is tuned +// for a parser that knows what kind of data is expected. +package jlexer + +import ( + "encoding/base64" + "errors" + "fmt" + "io" + "strconv" + "unicode" + "unicode/utf16" + "unicode/utf8" +) + +// tokenKind determines type of a token. +type tokenKind byte + +const ( + tokenUndef tokenKind = iota // No token. + tokenDelim // Delimiter: one of '{', '}', '[' or ']'. + tokenString // A string literal, e.g. "abc\u1234" + tokenNumber // Number literal, e.g. 1.5e5 + tokenBool // Boolean literal: true or false. + tokenNull // null keyword. +) + +// token describes a single token: type, position in the input and value. +type token struct { + kind tokenKind // Type of a token. + + boolValue bool // Value if a boolean literal token. + byteValue []byte // Raw value of a token. + delimValue byte +} + +// Lexer is a JSON lexer: it iterates over JSON tokens in a byte slice. +type Lexer struct { + Data []byte // Input data given to the lexer. + + start int // Start of the current token. + pos int // Current unscanned position in the input stream. + token token // Last scanned token, if token.kind != tokenUndef. + + firstElement bool // Whether current element is the first in array or an object. + wantSep byte // A comma or a colon character, which need to occur before a token. + + UseMultipleErrors bool // If we want to use multiple errors. + fatalError error // Fatal error occurred during lexing. It is usually a syntax error. + multipleErrors []*LexerError // Semantic errors occurred during lexing. Marshalling will be continued after finding this errors. +} + +// FetchToken scans the input for the next token. +func (r *Lexer) FetchToken() { + r.token.kind = tokenUndef + r.start = r.pos + + // Check if r.Data has r.pos element + // If it doesn't, it mean corrupted input data + if len(r.Data) < r.pos { + r.errParse("Unexpected end of data") + return + } + // Determine the type of a token by skipping whitespace and reading the + // first character. + for _, c := range r.Data[r.pos:] { + switch c { + case ':', ',': + if r.wantSep == c { + r.pos++ + r.start++ + r.wantSep = 0 + } else { + r.errSyntax() + } + + case ' ', '\t', '\r', '\n': + r.pos++ + r.start++ + + case '"': + if r.wantSep != 0 { + r.errSyntax() + } + + r.token.kind = tokenString + r.fetchString() + return + + case '{', '[': + if r.wantSep != 0 { + r.errSyntax() + } + r.firstElement = true + r.token.kind = tokenDelim + r.token.delimValue = r.Data[r.pos] + r.pos++ + return + + case '}', ']': + if !r.firstElement && (r.wantSep != ',') { + r.errSyntax() + } + r.wantSep = 0 + r.token.kind = tokenDelim + r.token.delimValue = r.Data[r.pos] + r.pos++ + return + + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-': + if r.wantSep != 0 { + r.errSyntax() + } + r.token.kind = tokenNumber + r.fetchNumber() + return + + case 'n': + if r.wantSep != 0 { + r.errSyntax() + } + + r.token.kind = tokenNull + r.fetchNull() + return + + case 't': + if r.wantSep != 0 { + r.errSyntax() + } + + r.token.kind = tokenBool + r.token.boolValue = true + r.fetchTrue() + return + + case 'f': + if r.wantSep != 0 { + r.errSyntax() + } + + r.token.kind = tokenBool + r.token.boolValue = false + r.fetchFalse() + return + + default: + r.errSyntax() + return + } + } + r.fatalError = io.EOF + return +} + +// isTokenEnd returns true if the char can follow a non-delimiter token +func isTokenEnd(c byte) bool { + return c == ' ' || c == '\t' || c == '\r' || c == '\n' || c == '[' || c == ']' || c == '{' || c == '}' || c == ',' || c == ':' +} + +// fetchNull fetches and checks remaining bytes of null keyword. +func (r *Lexer) fetchNull() { + r.pos += 4 + if r.pos > len(r.Data) || + r.Data[r.pos-3] != 'u' || + r.Data[r.pos-2] != 'l' || + r.Data[r.pos-1] != 'l' || + (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) { + + r.pos -= 4 + r.errSyntax() + } +} + +// fetchTrue fetches and checks remaining bytes of true keyword. +func (r *Lexer) fetchTrue() { + r.pos += 4 + if r.pos > len(r.Data) || + r.Data[r.pos-3] != 'r' || + r.Data[r.pos-2] != 'u' || + r.Data[r.pos-1] != 'e' || + (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) { + + r.pos -= 4 + r.errSyntax() + } +} + +// fetchFalse fetches and checks remaining bytes of false keyword. +func (r *Lexer) fetchFalse() { + r.pos += 5 + if r.pos > len(r.Data) || + r.Data[r.pos-4] != 'a' || + r.Data[r.pos-3] != 'l' || + r.Data[r.pos-2] != 's' || + r.Data[r.pos-1] != 'e' || + (r.pos != len(r.Data) && !isTokenEnd(r.Data[r.pos])) { + + r.pos -= 5 + r.errSyntax() + } +} + +// fetchNumber scans a number literal token. +func (r *Lexer) fetchNumber() { + hasE := false + afterE := false + hasDot := false + + r.pos++ + for i, c := range r.Data[r.pos:] { + switch { + case c >= '0' && c <= '9': + afterE = false + case c == '.' && !hasDot: + hasDot = true + case (c == 'e' || c == 'E') && !hasE: + hasE = true + hasDot = true + afterE = true + case (c == '+' || c == '-') && afterE: + afterE = false + default: + r.pos += i + if !isTokenEnd(c) { + r.errSyntax() + } else { + r.token.byteValue = r.Data[r.start:r.pos] + } + return + } + } + + r.pos = len(r.Data) + r.token.byteValue = r.Data[r.start:] +} + +// findStringLen tries to scan into the string literal for ending quote char to determine required size. +// The size will be exact if no escapes are present and may be inexact if there are escaped chars. +func findStringLen(data []byte) (hasEscapes bool, length int) { + delta := 0 + + for i := 0; i < len(data); i++ { + switch data[i] { + case '\\': + i++ + delta++ + if i < len(data) && data[i] == 'u' { + delta++ + } + case '"': + return (delta > 0), (i - delta) + } + } + + return false, len(data) +} + +// getu4 decodes \uXXXX from the beginning of s, returning the hex value, +// or it returns -1. +func getu4(s []byte) rune { + if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { + return -1 + } + var val rune + for i := 2; i < len(s) && i < 6; i++ { + var v byte + c := s[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + v = c - '0' + case 'a', 'b', 'c', 'd', 'e', 'f': + v = c - 'a' + 10 + case 'A', 'B', 'C', 'D', 'E', 'F': + v = c - 'A' + 10 + default: + return -1 + } + + val <<= 4 + val |= rune(v) + } + return val +} + +// processEscape processes a single escape sequence and returns number of bytes processed. +func (r *Lexer) processEscape(data []byte) (int, error) { + if len(data) < 2 { + return 0, fmt.Errorf("syntax error at %v", string(data)) + } + + c := data[1] + switch c { + case '"', '/', '\\': + r.token.byteValue = append(r.token.byteValue, c) + return 2, nil + case 'b': + r.token.byteValue = append(r.token.byteValue, '\b') + return 2, nil + case 'f': + r.token.byteValue = append(r.token.byteValue, '\f') + return 2, nil + case 'n': + r.token.byteValue = append(r.token.byteValue, '\n') + return 2, nil + case 'r': + r.token.byteValue = append(r.token.byteValue, '\r') + return 2, nil + case 't': + r.token.byteValue = append(r.token.byteValue, '\t') + return 2, nil + case 'u': + rr := getu4(data) + if rr < 0 { + return 0, errors.New("syntax error") + } + + read := 6 + if utf16.IsSurrogate(rr) { + rr1 := getu4(data[read:]) + if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { + read += 6 + rr = dec + } else { + rr = unicode.ReplacementChar + } + } + var d [4]byte + s := utf8.EncodeRune(d[:], rr) + r.token.byteValue = append(r.token.byteValue, d[:s]...) + return read, nil + } + + return 0, errors.New("syntax error") +} + +// fetchString scans a string literal token. +func (r *Lexer) fetchString() { + r.pos++ + data := r.Data[r.pos:] + + hasEscapes, length := findStringLen(data) + if !hasEscapes { + r.token.byteValue = data[:length] + r.pos += length + 1 + return + } + + r.token.byteValue = make([]byte, 0, length) + p := 0 + for i := 0; i < len(data); { + switch data[i] { + case '"': + r.pos += i + 1 + r.token.byteValue = append(r.token.byteValue, data[p:i]...) + i++ + return + + case '\\': + r.token.byteValue = append(r.token.byteValue, data[p:i]...) + off, err := r.processEscape(data[i:]) + if err != nil { + r.errParse(err.Error()) + return + } + i += off + p = i + + default: + i++ + } + } + r.errParse("unterminated string literal") +} + +// scanToken scans the next token if no token is currently available in the lexer. +func (r *Lexer) scanToken() { + if r.token.kind != tokenUndef || r.fatalError != nil { + return + } + + r.FetchToken() +} + +// consume resets the current token to allow scanning the next one. +func (r *Lexer) consume() { + r.token.kind = tokenUndef + r.token.delimValue = 0 +} + +// Ok returns true if no error (including io.EOF) was encountered during scanning. +func (r *Lexer) Ok() bool { + return r.fatalError == nil +} + +const maxErrorContextLen = 13 + +func (r *Lexer) errParse(what string) { + if r.fatalError == nil { + var str string + if len(r.Data)-r.pos <= maxErrorContextLen { + str = string(r.Data) + } else { + str = string(r.Data[r.pos:r.pos+maxErrorContextLen-3]) + "..." + } + r.fatalError = &LexerError{ + Reason: what, + Offset: r.pos, + Data: str, + } + } +} + +func (r *Lexer) errSyntax() { + r.errParse("syntax error") +} + +func (r *Lexer) errInvalidToken(expected string) { + if r.fatalError != nil { + return + } + if r.UseMultipleErrors { + r.pos = r.start + r.consume() + r.SkipRecursive() + switch expected { + case "[": + r.token.delimValue = ']' + r.token.kind = tokenDelim + case "{": + r.token.delimValue = '}' + r.token.kind = tokenDelim + } + r.addNonfatalError(&LexerError{ + Reason: fmt.Sprintf("expected %s", expected), + Offset: r.start, + Data: string(r.Data[r.start:r.pos]), + }) + return + } + + var str string + if len(r.token.byteValue) <= maxErrorContextLen { + str = string(r.token.byteValue) + } else { + str = string(r.token.byteValue[:maxErrorContextLen-3]) + "..." + } + r.fatalError = &LexerError{ + Reason: fmt.Sprintf("expected %s", expected), + Offset: r.pos, + Data: str, + } +} + +func (r *Lexer) GetPos() int { + return r.pos +} + +// Delim consumes a token and verifies that it is the given delimiter. +func (r *Lexer) Delim(c byte) { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + + if !r.Ok() || r.token.delimValue != c { + r.consume() // errInvalidToken can change token if UseMultipleErrors is enabled. + r.errInvalidToken(string([]byte{c})) + } else { + r.consume() + } +} + +// IsDelim returns true if there was no scanning error and next token is the given delimiter. +func (r *Lexer) IsDelim(c byte) bool { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + return !r.Ok() || r.token.delimValue == c +} + +// Null verifies that the next token is null and consumes it. +func (r *Lexer) Null() { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + if !r.Ok() || r.token.kind != tokenNull { + r.errInvalidToken("null") + } + r.consume() +} + +// IsNull returns true if the next token is a null keyword. +func (r *Lexer) IsNull() bool { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + return r.Ok() && r.token.kind == tokenNull +} + +// Skip skips a single token. +func (r *Lexer) Skip() { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + r.consume() +} + +// SkipRecursive skips next array or object completely, or just skips a single token if not +// an array/object. +// +// Note: no syntax validation is performed on the skipped data. +func (r *Lexer) SkipRecursive() { + r.scanToken() + var start, end byte + + if r.token.delimValue == '{' { + start, end = '{', '}' + } else if r.token.delimValue == '[' { + start, end = '[', ']' + } else { + r.consume() + return + } + + r.consume() + + level := 1 + inQuotes := false + wasEscape := false + + for i, c := range r.Data[r.pos:] { + switch { + case c == start && !inQuotes: + level++ + case c == end && !inQuotes: + level-- + if level == 0 { + r.pos += i + 1 + return + } + case c == '\\' && inQuotes: + wasEscape = !wasEscape + continue + case c == '"' && inQuotes: + inQuotes = wasEscape + case c == '"': + inQuotes = true + } + wasEscape = false + } + r.pos = len(r.Data) + r.fatalError = &LexerError{ + Reason: "EOF reached while skipping array/object or token", + Offset: r.pos, + Data: string(r.Data[r.pos:]), + } +} + +// Raw fetches the next item recursively as a data slice +func (r *Lexer) Raw() []byte { + r.SkipRecursive() + if !r.Ok() { + return nil + } + return r.Data[r.start:r.pos] +} + +// IsStart returns whether the lexer is positioned at the start +// of an input string. +func (r *Lexer) IsStart() bool { + return r.pos == 0 +} + +// Consumed reads all remaining bytes from the input, publishing an error if +// there is anything but whitespace remaining. +func (r *Lexer) Consumed() { + if r.pos > len(r.Data) || !r.Ok() { + return + } + + for _, c := range r.Data[r.pos:] { + if c != ' ' && c != '\t' && c != '\r' && c != '\n' { + r.AddError(&LexerError{ + Reason: "invalid character '" + string(c) + "' after top-level value", + Offset: r.pos, + Data: string(r.Data[r.pos:]), + }) + return + } + + r.pos++ + r.start++ + } +} + +func (r *Lexer) unsafeString() (string, []byte) { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + if !r.Ok() || r.token.kind != tokenString { + r.errInvalidToken("string") + return "", nil + } + bytes := r.token.byteValue + ret := bytesToStr(r.token.byteValue) + r.consume() + return ret, bytes +} + +// UnsafeString returns the string value if the token is a string literal. +// +// Warning: returned string may point to the input buffer, so the string should not outlive +// the input buffer. Intended pattern of usage is as an argument to a switch statement. +func (r *Lexer) UnsafeString() string { + ret, _ := r.unsafeString() + return ret +} + +// UnsafeBytes returns the byte slice if the token is a string literal. +func (r *Lexer) UnsafeBytes() []byte { + _, ret := r.unsafeString() + return ret +} + +// String reads a string literal. +func (r *Lexer) String() string { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + if !r.Ok() || r.token.kind != tokenString { + r.errInvalidToken("string") + return "" + } + ret := string(r.token.byteValue) + r.consume() + return ret +} + +// Bytes reads a string literal and base64 decodes it into a byte slice. +func (r *Lexer) Bytes() []byte { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + if !r.Ok() || r.token.kind != tokenString { + r.errInvalidToken("string") + return nil + } + ret := make([]byte, base64.StdEncoding.DecodedLen(len(r.token.byteValue))) + len, err := base64.StdEncoding.Decode(ret, r.token.byteValue) + if err != nil { + r.fatalError = &LexerError{ + Reason: err.Error(), + } + return nil + } + + r.consume() + return ret[:len] +} + +// Bool reads a true or false boolean keyword. +func (r *Lexer) Bool() bool { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + if !r.Ok() || r.token.kind != tokenBool { + r.errInvalidToken("bool") + return false + } + ret := r.token.boolValue + r.consume() + return ret +} + +func (r *Lexer) number() string { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + if !r.Ok() || r.token.kind != tokenNumber { + r.errInvalidToken("number") + return "" + } + ret := bytesToStr(r.token.byteValue) + r.consume() + return ret +} + +func (r *Lexer) Uint8() uint8 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 8) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return uint8(n) +} + +func (r *Lexer) Uint16() uint16 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 16) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return uint16(n) +} + +func (r *Lexer) Uint32() uint32 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 32) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return uint32(n) +} + +func (r *Lexer) Uint64() uint64 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return n +} + +func (r *Lexer) Uint() uint { + return uint(r.Uint64()) +} + +func (r *Lexer) Int8() int8 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 8) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return int8(n) +} + +func (r *Lexer) Int16() int16 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 16) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return int16(n) +} + +func (r *Lexer) Int32() int32 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 32) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return int32(n) +} + +func (r *Lexer) Int64() int64 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 64) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return n +} + +func (r *Lexer) Int() int { + return int(r.Int64()) +} + +func (r *Lexer) Uint8Str() uint8 { + s, b := r.unsafeString() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 8) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return uint8(n) +} + +func (r *Lexer) Uint16Str() uint16 { + s, b := r.unsafeString() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 16) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return uint16(n) +} + +func (r *Lexer) Uint32Str() uint32 { + s, b := r.unsafeString() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 32) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return uint32(n) +} + +func (r *Lexer) Uint64Str() uint64 { + s, b := r.unsafeString() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return n +} + +func (r *Lexer) UintStr() uint { + return uint(r.Uint64Str()) +} + +func (r *Lexer) Int8Str() int8 { + s, b := r.unsafeString() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 8) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return int8(n) +} + +func (r *Lexer) Int16Str() int16 { + s, b := r.unsafeString() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 16) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return int16(n) +} + +func (r *Lexer) Int32Str() int32 { + s, b := r.unsafeString() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 32) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return int32(n) +} + +func (r *Lexer) Int64Str() int64 { + s, b := r.unsafeString() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseInt(s, 10, 64) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: string(b), + }) + } + return n +} + +func (r *Lexer) IntStr() int { + return int(r.Int64Str()) +} + +func (r *Lexer) Float32() float32 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseFloat(s, 32) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return float32(n) +} + +func (r *Lexer) Float64() float64 { + s := r.number() + if !r.Ok() { + return 0 + } + + n, err := strconv.ParseFloat(s, 64) + if err != nil { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Reason: err.Error(), + Data: s, + }) + } + return n +} + +func (r *Lexer) Error() error { + return r.fatalError +} + +func (r *Lexer) AddError(e error) { + if r.fatalError == nil { + r.fatalError = e + } +} + +func (r *Lexer) AddNonFatalError(e error) { + r.addNonfatalError(&LexerError{ + Offset: r.start, + Data: string(r.Data[r.start:r.pos]), + Reason: e.Error(), + }) +} + +func (r *Lexer) addNonfatalError(err *LexerError) { + if r.UseMultipleErrors { + // We don't want to add errors with the same offset. + if len(r.multipleErrors) != 0 && r.multipleErrors[len(r.multipleErrors)-1].Offset == err.Offset { + return + } + r.multipleErrors = append(r.multipleErrors, err) + return + } + r.fatalError = err +} + +func (r *Lexer) GetNonFatalErrors() []*LexerError { + return r.multipleErrors +} + +// Interface fetches an interface{} analogous to the 'encoding/json' package. +func (r *Lexer) Interface() interface{} { + if r.token.kind == tokenUndef && r.Ok() { + r.FetchToken() + } + + if !r.Ok() { + return nil + } + switch r.token.kind { + case tokenString: + return r.String() + case tokenNumber: + return r.Float64() + case tokenBool: + return r.Bool() + case tokenNull: + r.Null() + return nil + } + + if r.token.delimValue == '{' { + r.consume() + + ret := map[string]interface{}{} + for !r.IsDelim('}') { + key := r.String() + r.WantColon() + ret[key] = r.Interface() + r.WantComma() + } + r.Delim('}') + + if r.Ok() { + return ret + } else { + return nil + } + } else if r.token.delimValue == '[' { + r.consume() + + var ret []interface{} + for !r.IsDelim(']') { + ret = append(ret, r.Interface()) + r.WantComma() + } + r.Delim(']') + + if r.Ok() { + return ret + } else { + return nil + } + } + r.errSyntax() + return nil +} + +// WantComma requires a comma to be present before fetching next token. +func (r *Lexer) WantComma() { + r.wantSep = ',' + r.firstElement = false +} + +// WantColon requires a colon to be present before fetching next token. +func (r *Lexer) WantColon() { + r.wantSep = ':' + r.firstElement = false +} diff --git a/src/stackdriver-nozzle/vendor/github.com/mailru/easyjson/jwriter/writer.go b/src/stackdriver-nozzle/vendor/github.com/mailru/easyjson/jwriter/writer.go new file mode 100644 index 00000000..7b55293a --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/mailru/easyjson/jwriter/writer.go @@ -0,0 +1,328 @@ +// Package jwriter contains a JSON writer. +package jwriter + +import ( + "encoding/base64" + "io" + "strconv" + "unicode/utf8" + + "github.com/mailru/easyjson/buffer" +) + +// Flags describe various encoding options. The behavior may be actually implemented in the encoder, but +// Flags field in Writer is used to set and pass them around. +type Flags int + +const ( + NilMapAsEmpty Flags = 1 << iota // Encode nil map as '{}' rather than 'null'. + NilSliceAsEmpty // Encode nil slice as '[]' rather than 'null'. +) + +// Writer is a JSON writer. +type Writer struct { + Flags Flags + + Error error + Buffer buffer.Buffer + NoEscapeHTML bool +} + +// Size returns the size of the data that was written out. +func (w *Writer) Size() int { + return w.Buffer.Size() +} + +// DumpTo outputs the data to given io.Writer, resetting the buffer. +func (w *Writer) DumpTo(out io.Writer) (written int, err error) { + return w.Buffer.DumpTo(out) +} + +// BuildBytes returns writer data as a single byte slice. You can optionally provide one byte slice +// as argument that it will try to reuse. +func (w *Writer) BuildBytes(reuse ...[]byte) ([]byte, error) { + if w.Error != nil { + return nil, w.Error + } + + return w.Buffer.BuildBytes(reuse...), nil +} + +// ReadCloser returns an io.ReadCloser that can be used to read the data. +// ReadCloser also resets the buffer. +func (w *Writer) ReadCloser() (io.ReadCloser, error) { + if w.Error != nil { + return nil, w.Error + } + + return w.Buffer.ReadCloser(), nil +} + +// RawByte appends raw binary data to the buffer. +func (w *Writer) RawByte(c byte) { + w.Buffer.AppendByte(c) +} + +// RawByte appends raw binary data to the buffer. +func (w *Writer) RawString(s string) { + w.Buffer.AppendString(s) +} + +// Raw appends raw binary data to the buffer or sets the error if it is given. Useful for +// calling with results of MarshalJSON-like functions. +func (w *Writer) Raw(data []byte, err error) { + switch { + case w.Error != nil: + return + case err != nil: + w.Error = err + case len(data) > 0: + w.Buffer.AppendBytes(data) + default: + w.RawString("null") + } +} + +// RawText encloses raw binary data in quotes and appends in to the buffer. +// Useful for calling with results of MarshalText-like functions. +func (w *Writer) RawText(data []byte, err error) { + switch { + case w.Error != nil: + return + case err != nil: + w.Error = err + case len(data) > 0: + w.String(string(data)) + default: + w.RawString("null") + } +} + +// Base64Bytes appends data to the buffer after base64 encoding it +func (w *Writer) Base64Bytes(data []byte) { + if data == nil { + w.Buffer.AppendString("null") + return + } + w.Buffer.AppendByte('"') + dst := make([]byte, base64.StdEncoding.EncodedLen(len(data))) + base64.StdEncoding.Encode(dst, data) + w.Buffer.AppendBytes(dst) + w.Buffer.AppendByte('"') +} + +func (w *Writer) Uint8(n uint8) { + w.Buffer.EnsureSpace(3) + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) +} + +func (w *Writer) Uint16(n uint16) { + w.Buffer.EnsureSpace(5) + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) +} + +func (w *Writer) Uint32(n uint32) { + w.Buffer.EnsureSpace(10) + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) +} + +func (w *Writer) Uint(n uint) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) +} + +func (w *Writer) Uint64(n uint64) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10) +} + +func (w *Writer) Int8(n int8) { + w.Buffer.EnsureSpace(4) + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) +} + +func (w *Writer) Int16(n int16) { + w.Buffer.EnsureSpace(6) + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) +} + +func (w *Writer) Int32(n int32) { + w.Buffer.EnsureSpace(11) + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) +} + +func (w *Writer) Int(n int) { + w.Buffer.EnsureSpace(21) + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) +} + +func (w *Writer) Int64(n int64) { + w.Buffer.EnsureSpace(21) + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10) +} + +func (w *Writer) Uint8Str(n uint8) { + w.Buffer.EnsureSpace(3) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Uint16Str(n uint16) { + w.Buffer.EnsureSpace(5) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Uint32Str(n uint32) { + w.Buffer.EnsureSpace(10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) UintStr(n uint) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, uint64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Uint64Str(n uint64) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendUint(w.Buffer.Buf, n, 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Int8Str(n int8) { + w.Buffer.EnsureSpace(4) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Int16Str(n int16) { + w.Buffer.EnsureSpace(6) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Int32Str(n int32) { + w.Buffer.EnsureSpace(11) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) IntStr(n int) { + w.Buffer.EnsureSpace(21) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, int64(n), 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Int64Str(n int64) { + w.Buffer.EnsureSpace(21) + w.Buffer.Buf = append(w.Buffer.Buf, '"') + w.Buffer.Buf = strconv.AppendInt(w.Buffer.Buf, n, 10) + w.Buffer.Buf = append(w.Buffer.Buf, '"') +} + +func (w *Writer) Float32(n float32) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, float64(n), 'g', -1, 32) +} + +func (w *Writer) Float64(n float64) { + w.Buffer.EnsureSpace(20) + w.Buffer.Buf = strconv.AppendFloat(w.Buffer.Buf, n, 'g', -1, 64) +} + +func (w *Writer) Bool(v bool) { + w.Buffer.EnsureSpace(5) + if v { + w.Buffer.Buf = append(w.Buffer.Buf, "true"...) + } else { + w.Buffer.Buf = append(w.Buffer.Buf, "false"...) + } +} + +const chars = "0123456789abcdef" + +func isNotEscapedSingleChar(c byte, escapeHTML bool) bool { + // Note: might make sense to use a table if there are more chars to escape. With 4 chars + // it benchmarks the same. + if escapeHTML { + return c != '<' && c != '>' && c != '&' && c != '\\' && c != '"' && c >= 0x20 && c < utf8.RuneSelf + } else { + return c != '\\' && c != '"' && c >= 0x20 && c < utf8.RuneSelf + } +} + +func (w *Writer) String(s string) { + w.Buffer.AppendByte('"') + + // Portions of the string that contain no escapes are appended as + // byte slices. + + p := 0 // last non-escape symbol + + for i := 0; i < len(s); { + c := s[i] + + if isNotEscapedSingleChar(c, !w.NoEscapeHTML) { + // single-width character, no escaping is required + i++ + continue + } else if c < utf8.RuneSelf { + // single-with character, need to escape + w.Buffer.AppendString(s[p:i]) + switch c { + case '\t': + w.Buffer.AppendString(`\t`) + case '\r': + w.Buffer.AppendString(`\r`) + case '\n': + w.Buffer.AppendString(`\n`) + case '\\': + w.Buffer.AppendString(`\\`) + case '"': + w.Buffer.AppendString(`\"`) + default: + w.Buffer.AppendString(`\u00`) + w.Buffer.AppendByte(chars[c>>4]) + w.Buffer.AppendByte(chars[c&0xf]) + } + + i++ + p = i + continue + } + + // broken utf + runeValue, runeWidth := utf8.DecodeRuneInString(s[i:]) + if runeValue == utf8.RuneError && runeWidth == 1 { + w.Buffer.AppendString(s[p:i]) + w.Buffer.AppendString(`\ufffd`) + i++ + p = i + continue + } + + // jsonp stuff - tab separator and line separator + if runeValue == '\u2028' || runeValue == '\u2029' { + w.Buffer.AppendString(s[p:i]) + w.Buffer.AppendString(`\u202`) + w.Buffer.AppendByte(chars[runeValue&0xf]) + i += runeWidth + p = i + continue + } + i += runeWidth + } + w.Buffer.AppendString(s[p:]) + w.Buffer.AppendByte('"') +} diff --git a/src/stackdriver-nozzle/vendor/github.com/mailru/easyjson/raw.go b/src/stackdriver-nozzle/vendor/github.com/mailru/easyjson/raw.go new file mode 100644 index 00000000..81bd002e --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/mailru/easyjson/raw.go @@ -0,0 +1,45 @@ +package easyjson + +import ( + "github.com/mailru/easyjson/jlexer" + "github.com/mailru/easyjson/jwriter" +) + +// RawMessage is a raw piece of JSON (number, string, bool, object, array or +// null) that is extracted without parsing and output as is during marshaling. +type RawMessage []byte + +// MarshalEasyJSON does JSON marshaling using easyjson interface. +func (v *RawMessage) MarshalEasyJSON(w *jwriter.Writer) { + if len(*v) == 0 { + w.RawString("null") + } else { + w.Raw(*v, nil) + } +} + +// UnmarshalEasyJSON does JSON unmarshaling using easyjson interface. +func (v *RawMessage) UnmarshalEasyJSON(l *jlexer.Lexer) { + *v = RawMessage(l.Raw()) +} + +// UnmarshalJSON implements encoding/json.Unmarshaler interface. +func (v *RawMessage) UnmarshalJSON(data []byte) error { + *v = data + return nil +} + +var nullBytes = []byte("null") + +// MarshalJSON implements encoding/json.Marshaler interface. +func (v RawMessage) MarshalJSON() ([]byte, error) { + if len(v) == 0 { + return nullBytes, nil + } + return v, nil +} + +// IsDefined is required for integration with omitempty easyjson logic. +func (v *RawMessage) IsDefined() bool { + return len(*v) > 0 +} diff --git a/src/stackdriver-nozzle/vendor/github.com/mitchellh/hashstructure/README.md b/src/stackdriver-nozzle/vendor/github.com/mitchellh/hashstructure/README.md index 619a7faf..28ce45a3 100644 --- a/src/stackdriver-nozzle/vendor/github.com/mitchellh/hashstructure/README.md +++ b/src/stackdriver-nozzle/vendor/github.com/mitchellh/hashstructure/README.md @@ -19,6 +19,9 @@ sending data across the network, caching values locally (de-dup), and so on. * Optionally specify a custom hash function to optimize for speed, collision avoidance for your data set, etc. + + * Optionally hash the output of `.String()` on structs that implement fmt.Stringer, + allowing effective hashing of time.Time ## Installation diff --git a/src/stackdriver-nozzle/vendor/github.com/mitchellh/hashstructure/hashstructure.go b/src/stackdriver-nozzle/vendor/github.com/mitchellh/hashstructure/hashstructure.go index 1800f435..ea13a158 100644 --- a/src/stackdriver-nozzle/vendor/github.com/mitchellh/hashstructure/hashstructure.go +++ b/src/stackdriver-nozzle/vendor/github.com/mitchellh/hashstructure/hashstructure.go @@ -8,6 +8,16 @@ import ( "reflect" ) +// ErrNotStringer is returned when there's an error with hash:"string" +type ErrNotStringer struct { + Field string +} + +// Error implements error for ErrNotStringer +func (ens *ErrNotStringer) Error() string { + return fmt.Sprintf("hashstructure: %s has hash:\"string\" set, but does not implement fmt.Stringer", ens.Field) +} + // HashOptions are options that are available for hashing. type HashOptions struct { // Hasher is the hash function to use. If this isn't set, it will @@ -27,8 +37,8 @@ type HashOptions struct { // // If opts is nil, then default options will be used. See HashOptions // for the default values. The same *HashOptions value cannot be used -// concurrently. None of the values within a *HashOptions struct are -// safe to read/write while hashing is being done. +// concurrently. None of the values within a *HashOptions struct are +// safe to read/write while hashing is being done. // // Notes on the value: // @@ -52,6 +62,9 @@ type HashOptions struct { // * "set" - The field will be treated as a set, where ordering doesn't // affect the hash code. This only works for slices. // +// * "string" - The field will be hashed as a string, only works when the +// field implements fmt.Stringer +// func Hash(v interface{}, opts *HashOptions) (uint64, error) { // Create default options if opts == nil { @@ -201,8 +214,8 @@ func (w *walker) visit(v reflect.Value, opts *visitOpts) (uint64, error) { return h, nil case reflect.Struct: - var include Includable parent := v.Interface() + var include Includable if impl, ok := parent.(Includable); ok { include = impl } @@ -215,7 +228,7 @@ func (w *walker) visit(v reflect.Value, opts *visitOpts) (uint64, error) { l := v.NumField() for i := 0; i < l; i++ { - if v := v.Field(i); v.CanSet() || t.Field(i).Name != "_" { + if innerV := v.Field(i); v.CanSet() || t.Field(i).Name != "_" { var f visitFlag fieldType := t.Field(i) if fieldType.PkgPath != "" { @@ -229,9 +242,20 @@ func (w *walker) visit(v reflect.Value, opts *visitOpts) (uint64, error) { continue } + // if string is set, use the string value + if tag == "string" { + if impl, ok := innerV.Interface().(fmt.Stringer); ok { + innerV = reflect.ValueOf(impl.String()) + } else { + return 0, &ErrNotStringer{ + Field: v.Type().Field(i).Name, + } + } + } + // Check if we implement includable and check it if include != nil { - incl, err := include.HashInclude(fieldType.Name, v) + incl, err := include.HashInclude(fieldType.Name, innerV) if err != nil { return 0, err } @@ -250,7 +274,7 @@ func (w *walker) visit(v reflect.Value, opts *visitOpts) (uint64, error) { return 0, err } - vh, err := w.visit(v, &visitOpts{ + vh, err := w.visit(innerV, &visitOpts{ Flags: f, Struct: parent, StructField: fieldType.Name, @@ -301,7 +325,6 @@ func (w *walker) visit(v reflect.Value, opts *visitOpts) (uint64, error) { return 0, fmt.Errorf("unknown kind to hash: %s", k) } - return 0, nil } func hashUpdateOrdered(h hash.Hash64, a, b uint64) uint64 { diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/CHANGELOG.md b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/CHANGELOG.md index 5bc46f91..6a4ab3f1 100644 --- a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/CHANGELOG.md +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/CHANGELOG.md @@ -1,10 +1,29 @@ ## HEAD +- When using custom reporters register the custom reporters *before* the default reporter. This allows users to see the output of any print statements in their customer reporters. [#365] + +## 1.4.0 7/16/2017 + +- `ginkgo` now provides a hint if you accidentally forget to run `ginkgo bootstrap` to generate a `*_suite_test.go` file that actually invokes the Ginkgo test runner. [#345](https://github.com/onsi/ginkgo/pull/345) +- thanks to improvements in `go test -c` `ginkgo` no longer needs to fix Go's compilation output to ensure compilation errors are expressed relative to the CWD. [#357] +- `ginkgo watch -watchRegExp=...` allows you to specify a custom regular expression to watch. Only files matching the regular expression are watched for changes (the default is `\.go$`) [#356] +- `ginkgo` now always emits compilation output. Previously, only failed compilation output was printed out. [#277] +- `ginkgo -requireSuite` now fails the test run if there are `*_test.go` files but `go test` fails to detect any tests. Typically this means you forgot to run `ginkgo bootstrap` to generate a suite file. [#344] +- `ginkgo -timeout=DURATION` allows you to adjust the timeout for the entire test suite (default is 24 hours) [#248] + +## 1.3.0 3/28/2017 + Improvements: +- Significantly improved parallel test distribution. Now instead of pre-sharding test cases across workers (which can result in idle workers and poor test performance) Ginkgo uses a shared queue to keep all workers busy until all tests are complete. This improves test-time performance and consistency. - `Skip(message)` can be used to skip the current test. - Added `extensions/table` - a Ginkgo DSL for [Table Driven Tests](http://onsi.github.io/ginkgo/#table-driven-tests) - Add `GinkgoRandomSeed()` - shorthand for `config.GinkgoConfig.RandomSeed` +- Support for retrying flaky tests with `--flakeAttempts` +- `ginkgo ./...` now recurses as you'd expect +- Added `Specify` a synonym for `It` +- Support colorise on Windows +- Broader support for various go compilation flags in the `ginkgo` CLI Bug Fixes: diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/CONTRIBUTING.md b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/CONTRIBUTING.md new file mode 100644 index 00000000..bc0c54fe --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/CONTRIBUTING.md @@ -0,0 +1,12 @@ +# Contributing to Ginkgo + +Your contributions to Ginkgo are essential for its long-term maintenance and improvement. To make a contribution: + +- Please **open an issue first** - describe what problem you are trying to solve and give the community a forum for input and feedback ahead of investing time in writing code! +- Ensure adequate test coverage: + - If you're adding functionality to the Ginkgo library, make sure to add appropriate unit and/or integration tests (under the `integration` folder). + - If you're adding functionality to the Ginkgo CLI note that there are very few unit tests. Please add an integration test. + - Please run all tests locally (`ginkgo -r -p`) and make sure they go green before submitting the PR +- Update the documentation. In addition to standard `godoc` comments Ginkgo has extensive documentation on the `gh-pages` branch. If relevant, please submit a docs PR to that branch alongside your code PR. + +Thanks for supporting Ginkgo! \ No newline at end of file diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/README.md b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/README.md index b8b77b57..97e9cdc4 100644 --- a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/README.md +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/README.md @@ -1,10 +1,10 @@ ![Ginkgo: A Golang BDD Testing Framework](http://onsi.github.io/ginkgo/images/ginkgo.png) -[![Build Status](https://travis-ci.org/onsi/ginkgo.png)](https://travis-ci.org/onsi/ginkgo) +[![Build Status](https://travis-ci.org/onsi/ginkgo.svg)](https://travis-ci.org/onsi/ginkgo) Jump to the [docs](http://onsi.github.io/ginkgo/) to learn more. To start rolling your Ginkgo tests *now* [keep reading](#set-me-up)! -To discuss Ginkgo and get updates, join the [google group](https://groups.google.com/d/forum/ginkgo-and-gomega). +If you have a question, comment, bug report, feature request, etc. please open a GitHub issue. ## Feature List @@ -43,6 +43,8 @@ To discuss Ginkgo and get updates, join the [google group](https://groups.google - [Completions for Sublime Text](https://github.com/onsi/ginkgo-sublime-completions): just use [Package Control](https://sublime.wbond.net/) to install `Ginkgo Completions`. +- [Completions for VSCode](https://github.com/onsi/vscode-ginkgo): just use VSCode's extension installer to install `vscode-ginkgo`. + - Straightforward support for third-party testing libraries such as [Gomock](https://code.google.com/p/gomock/) and [Testify](https://github.com/stretchr/testify). Check out the [docs](http://onsi.github.io/ginkgo/#third-party-integrations) for details. - A modular architecture that lets you easily: @@ -113,3 +115,9 @@ Go explore! ## License Ginkgo is MIT-Licensed + +## Contributing + +Since Ginkgo tests also internal packages, when you fork, you'll have to replace imports with your repository.
+Use `before_pr.sh` for that
+After you finished your changes and before you push your pull request, use `after_pr.sh` to revert those changes \ No newline at end of file diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/before_pr.sh b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/before_pr.sh new file mode 100755 index 00000000..3cf262f3 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/before_pr.sh @@ -0,0 +1,13 @@ +# Take current path +path=$(pwd) + +# Split it +IFS='\/'; arrIN=($path); unset IFS; + +# Find directory before ginkgo +len=${#arrIN[@]} + +userDir=${arrIN[$len-2]} + +# Replace onsi with userdir +find . -type f -name '*.go' -exec sed -i '' s/github.com\\/onsi\\/ginkgo\\/internal/github.com\\/$userDir\\/ginkgo\\/internal/ {} + \ No newline at end of file diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/config/config.go b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/config/config.go index 4b5485b4..60d5ea22 100644 --- a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/config/config.go +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/config/config.go @@ -20,7 +20,7 @@ import ( "fmt" ) -const VERSION = "1.2.0" +const VERSION = "1.4.0" type GinkgoConfigType struct { RandomSeed int64 diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go index 8fe0b70a..8befd35a 100644 --- a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/ginkgo_dsl.go @@ -202,7 +202,7 @@ func RunSpecs(t GinkgoTestingT, description string) bool { //To run your tests with Ginkgo's default reporter and your custom reporter(s), replace //RunSpecs() with this method. func RunSpecsWithDefaultAndCustomReporters(t GinkgoTestingT, description string, specReporters []Reporter) bool { - specReporters = append([]Reporter{buildDefaultReporter()}, specReporters...) + specReporters = append(specReporters, buildDefaultReporter()) return RunSpecsWithCustomReporters(t, description, specReporters) } @@ -216,7 +216,7 @@ func RunSpecsWithCustomReporters(t GinkgoTestingT, description string, specRepor reporters[i] = reporter } passed, hasFocusedTests := globalSuite.Run(t, description, reporters, writer, config.GinkgoConfig) - if passed && hasFocusedTests { + if passed && hasFocusedTests && strings.TrimSpace(os.Getenv("GINKGO_EDITOR_INTEGRATION")) == "" { fmt.Println("PASS | FOCUSED") os.Exit(types.GINKGO_FOCUS_EXIT_CODE) } diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go index 1e34dbf6..522d44e3 100644 --- a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/internal/remote/aggregator.go @@ -125,14 +125,12 @@ func (aggregator *Aggregator) registerSuiteBeginning(configAndSuite configAndSui aggregator.stenographer.AnnounceSuite(configAndSuite.summary.SuiteDescription, configAndSuite.config.RandomSeed, configAndSuite.config.RandomizeAllSpecs, aggregator.config.Succinct) - numberOfSpecsToRun := 0 totalNumberOfSpecs := 0 - for _, configAndSuite := range aggregator.aggregatedSuiteBeginnings { - numberOfSpecsToRun += configAndSuite.summary.NumberOfSpecsThatWillBeRun - totalNumberOfSpecs += configAndSuite.summary.NumberOfTotalSpecs + if len(aggregator.aggregatedSuiteBeginnings) > 0 { + totalNumberOfSpecs = configAndSuite.summary.NumberOfSpecsBeforeParallelization } - aggregator.stenographer.AnnounceNumberOfSpecs(numberOfSpecsToRun, totalNumberOfSpecs, aggregator.config.Succinct) + aggregator.stenographer.AnnounceTotalNumberOfSpecs(totalNumberOfSpecs, aggregator.config.Succinct) aggregator.stenographer.AnnounceAggregatedParallelRun(aggregator.nodeCount, aggregator.config.Succinct) aggregator.flushCompletedSpecs() } @@ -239,6 +237,7 @@ func (aggregator *Aggregator) registerSuiteEnding(suite *types.SuiteSummary) (fi aggregatedSuiteSummary.NumberOfFailedSpecs += suiteSummary.NumberOfFailedSpecs aggregatedSuiteSummary.NumberOfPendingSpecs += suiteSummary.NumberOfPendingSpecs aggregatedSuiteSummary.NumberOfSkippedSpecs += suiteSummary.NumberOfSkippedSpecs + aggregatedSuiteSummary.NumberOfFlakedSpecs += suiteSummary.NumberOfFlakedSpecs } aggregatedSuiteSummary.RunTime = time.Since(aggregator.startTime) diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/internal/remote/server.go b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/internal/remote/server.go index b55c681b..297af2eb 100644 --- a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/internal/remote/server.go +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/internal/remote/server.go @@ -9,13 +9,16 @@ package remote import ( "encoding/json" - "github.com/onsi/ginkgo/config" - "github.com/onsi/ginkgo/reporters" - "github.com/onsi/ginkgo/types" "io/ioutil" "net" "net/http" "sync" + + "github.com/onsi/ginkgo/internal/spec_iterator" + + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/reporters" + "github.com/onsi/ginkgo/types" ) /* @@ -29,6 +32,7 @@ type Server struct { lock *sync.Mutex beforeSuiteData types.RemoteBeforeSuiteData parallelTotal int + counter int } //Create a new server, automatically selecting a port @@ -63,6 +67,8 @@ func (server *Server) Start() { //synchronization endpoints mux.HandleFunc("/BeforeSuiteState", server.handleBeforeSuiteState) mux.HandleFunc("/RemoteAfterSuiteData", server.handleRemoteAfterSuiteData) + mux.HandleFunc("/counter", server.handleCounter) + mux.HandleFunc("/has-counter", server.handleHasCounter) //for backward compatibility go httpServer.Serve(server.listener) } @@ -202,3 +208,17 @@ func (server *Server) handleRemoteAfterSuiteData(writer http.ResponseWriter, req enc := json.NewEncoder(writer) enc.Encode(afterSuiteData) } + +func (server *Server) handleCounter(writer http.ResponseWriter, request *http.Request) { + c := spec_iterator.Counter{} + server.lock.Lock() + c.Index = server.counter + server.counter = server.counter + 1 + server.lock.Unlock() + + json.NewEncoder(writer).Encode(c) +} + +func (server *Server) handleHasCounter(writer http.ResponseWriter, request *http.Request) { + writer.Write([]byte("")) +} diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/internal/spec/specs.go b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/internal/spec/specs.go index 6f4fbd36..006185ab 100644 --- a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/internal/spec/specs.go +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/internal/spec/specs.go @@ -7,16 +7,14 @@ import ( ) type Specs struct { - specs []*Spec - numberOfOriginalSpecs int - hasProgrammaticFocus bool - RegexScansFilePath bool + specs []*Spec + hasProgrammaticFocus bool + RegexScansFilePath bool } func NewSpecs(specs []*Spec) *Specs { return &Specs{ specs: specs, - numberOfOriginalSpecs: len(specs), } } @@ -24,10 +22,6 @@ func (e *Specs) Specs() []*Spec { return e.specs } -func (e *Specs) NumberOfOriginalSpecs() int { - return e.numberOfOriginalSpecs -} - func (e *Specs) HasProgrammaticFocus() bool { return e.hasProgrammaticFocus } @@ -114,15 +108,6 @@ func (e *Specs) SkipMeasurements() { } } -func (e *Specs) TrimForParallelization(total int, node int) { - startIndex, count := ParallelizedIndexRange(len(e.specs), total, node) - if count == 0 { - e.specs = make([]*Spec, 0) - } else { - e.specs = e.specs[startIndex : startIndex+count] - } -} - //sort.Interface func (e *Specs) Len() int { diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/internal/spec/index_computer.go b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go similarity index 98% rename from src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/internal/spec/index_computer.go rename to src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go index 5a67fc7b..82272554 100644 --- a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/internal/spec/index_computer.go +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/internal/spec_iterator/index_computer.go @@ -1,4 +1,4 @@ -package spec +package spec_iterator func ParallelizedIndexRange(length int, parallelTotal int, parallelNode int) (startIndex int, count int) { if length == 0 { diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go new file mode 100644 index 00000000..54e61ecb --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/internal/spec_iterator/parallel_spec_iterator.go @@ -0,0 +1,60 @@ +package spec_iterator + +import ( + "encoding/json" + "errors" + "fmt" + "net/http" + + "github.com/onsi/ginkgo/internal/spec" +) + +type ParallelIterator struct { + specs []*spec.Spec + host string + client *http.Client +} + +func NewParallelIterator(specs []*spec.Spec, host string) *ParallelIterator { + return &ParallelIterator{ + specs: specs, + host: host, + client: &http.Client{}, + } +} + +func (s *ParallelIterator) Next() (*spec.Spec, error) { + resp, err := s.client.Get(s.host + "/counter") + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return nil, errors.New(fmt.Sprintf("unexpected status code %d", resp.StatusCode)) + } + + var counter Counter + err = json.NewDecoder(resp.Body).Decode(&counter) + if err != nil { + return nil, err + } + + if counter.Index >= len(s.specs) { + return nil, ErrClosed + } + + return s.specs[counter.Index], nil +} + +func (s *ParallelIterator) NumberOfSpecsPriorToIteration() int { + return len(s.specs) +} + +func (s *ParallelIterator) NumberOfSpecsToProcessIfKnown() (int, bool) { + return -1, false +} + +func (s *ParallelIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) { + return -1, false +} diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go new file mode 100644 index 00000000..a51c93b8 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/internal/spec_iterator/serial_spec_iterator.go @@ -0,0 +1,45 @@ +package spec_iterator + +import ( + "github.com/onsi/ginkgo/internal/spec" +) + +type SerialIterator struct { + specs []*spec.Spec + index int +} + +func NewSerialIterator(specs []*spec.Spec) *SerialIterator { + return &SerialIterator{ + specs: specs, + index: 0, + } +} + +func (s *SerialIterator) Next() (*spec.Spec, error) { + if s.index >= len(s.specs) { + return nil, ErrClosed + } + + spec := s.specs[s.index] + s.index += 1 + return spec, nil +} + +func (s *SerialIterator) NumberOfSpecsPriorToIteration() int { + return len(s.specs) +} + +func (s *SerialIterator) NumberOfSpecsToProcessIfKnown() (int, bool) { + return len(s.specs), true +} + +func (s *SerialIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) { + count := 0 + for _, s := range s.specs { + if !s.Skipped() && !s.Pending() { + count += 1 + } + } + return count, true +} diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go new file mode 100644 index 00000000..ad4a3ea3 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/internal/spec_iterator/sharded_parallel_spec_iterator.go @@ -0,0 +1,47 @@ +package spec_iterator + +import "github.com/onsi/ginkgo/internal/spec" + +type ShardedParallelIterator struct { + specs []*spec.Spec + index int + maxIndex int +} + +func NewShardedParallelIterator(specs []*spec.Spec, total int, node int) *ShardedParallelIterator { + startIndex, count := ParallelizedIndexRange(len(specs), total, node) + + return &ShardedParallelIterator{ + specs: specs, + index: startIndex, + maxIndex: startIndex + count, + } +} + +func (s *ShardedParallelIterator) Next() (*spec.Spec, error) { + if s.index >= s.maxIndex { + return nil, ErrClosed + } + + spec := s.specs[s.index] + s.index += 1 + return spec, nil +} + +func (s *ShardedParallelIterator) NumberOfSpecsPriorToIteration() int { + return len(s.specs) +} + +func (s *ShardedParallelIterator) NumberOfSpecsToProcessIfKnown() (int, bool) { + return s.maxIndex - s.index, true +} + +func (s *ShardedParallelIterator) NumberOfSpecsThatWillBeRunIfKnown() (int, bool) { + count := 0 + for i := s.index; i < s.maxIndex; i += 1 { + if !s.specs[i].Skipped() && !s.specs[i].Pending() { + count += 1 + } + } + return count, true +} diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go new file mode 100644 index 00000000..74bffad6 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/internal/spec_iterator/spec_iterator.go @@ -0,0 +1,20 @@ +package spec_iterator + +import ( + "errors" + + "github.com/onsi/ginkgo/internal/spec" +) + +var ErrClosed = errors.New("no more specs to run") + +type SpecIterator interface { + Next() (*spec.Spec, error) + NumberOfSpecsPriorToIteration() int + NumberOfSpecsToProcessIfKnown() (int, bool) + NumberOfSpecsThatWillBeRunIfKnown() (int, bool) +} + +type Counter struct { + Index int `json:"index"` +} diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go index 869aaeca..d4dd909e 100644 --- a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/internal/specrunner/spec_runner.go @@ -7,6 +7,8 @@ import ( "sync" "syscall" + "github.com/onsi/ginkgo/internal/spec_iterator" + "github.com/onsi/ginkgo/config" "github.com/onsi/ginkgo/internal/leafnodes" "github.com/onsi/ginkgo/internal/spec" @@ -20,7 +22,7 @@ import ( type SpecRunner struct { description string beforeSuiteNode leafnodes.SuiteNode - specs *spec.Specs + iterator spec_iterator.SpecIterator afterSuiteNode leafnodes.SuiteNode reporters []reporters.Reporter startTime time.Time @@ -29,14 +31,15 @@ type SpecRunner struct { writer Writer.WriterInterface config config.GinkgoConfigType interrupted bool + processedSpecs []*spec.Spec lock *sync.Mutex } -func New(description string, beforeSuiteNode leafnodes.SuiteNode, specs *spec.Specs, afterSuiteNode leafnodes.SuiteNode, reporters []reporters.Reporter, writer Writer.WriterInterface, config config.GinkgoConfigType) *SpecRunner { +func New(description string, beforeSuiteNode leafnodes.SuiteNode, iterator spec_iterator.SpecIterator, afterSuiteNode leafnodes.SuiteNode, reporters []reporters.Reporter, writer Writer.WriterInterface, config config.GinkgoConfigType) *SpecRunner { return &SpecRunner{ description: description, beforeSuiteNode: beforeSuiteNode, - specs: specs, + iterator: iterator, afterSuiteNode: afterSuiteNode, reporters: reporters, writer: writer, @@ -79,7 +82,18 @@ func (runner *SpecRunner) performDryRun() { runner.reportBeforeSuite(summary) } - for _, spec := range runner.specs.Specs() { + for { + spec, err := runner.iterator.Next() + if err == spec_iterator.ErrClosed { + break + } + if err != nil { + fmt.Println("failed to iterate over tests:\n" + err.Error()) + break + } + + runner.processedSpecs = append(runner.processedSpecs, spec) + summary := spec.Summary(runner.suiteID) runner.reportSpecWillRun(summary) if summary.State == types.SpecStateInvalid { @@ -130,9 +144,21 @@ func (runner *SpecRunner) runAfterSuite() bool { func (runner *SpecRunner) runSpecs() bool { suiteFailed := false skipRemainingSpecs := false - for _, spec := range runner.specs.Specs() { + for { + spec, err := runner.iterator.Next() + if err == spec_iterator.ErrClosed { + break + } + if err != nil { + fmt.Println("failed to iterate over tests:\n" + err.Error()) + suiteFailed = true + break + } + + runner.processedSpecs = append(runner.processedSpecs, spec) + if runner.wasInterrupted() { - return suiteFailed + break } if skipRemainingSpecs { spec.Skip() @@ -244,7 +270,7 @@ func (runner *SpecRunner) wasInterrupted() bool { func (runner *SpecRunner) reportSuiteWillBegin() { runner.startTime = time.Now() - summary := runner.summary(true) + summary := runner.suiteWillBeginSummary() for _, reporter := range runner.reporters { reporter.SpecSuiteWillBegin(runner.config, summary) } @@ -271,6 +297,9 @@ func (runner *SpecRunner) reportSpecWillRun(summary *types.SpecSummary) { } func (runner *SpecRunner) reportSpecDidComplete(summary *types.SpecSummary, failed bool) { + if failed && len(summary.CapturedOutput) == 0 { + summary.CapturedOutput = string(runner.writer.Bytes()) + } for i := len(runner.reporters) - 1; i >= 1; i-- { runner.reporters[i].SpecDidComplete(summary) } @@ -283,17 +312,17 @@ func (runner *SpecRunner) reportSpecDidComplete(summary *types.SpecSummary, fail } func (runner *SpecRunner) reportSuiteDidEnd(success bool) { - summary := runner.summary(success) + summary := runner.suiteDidEndSummary(success) summary.RunTime = time.Since(runner.startTime) for _, reporter := range runner.reporters { reporter.SpecSuiteDidEnd(summary) } } -func (runner *SpecRunner) countSpecsSatisfying(filter func(ex *spec.Spec) bool) (count int) { +func (runner *SpecRunner) countSpecsThatRanSatisfying(filter func(ex *spec.Spec) bool) (count int) { count = 0 - for _, spec := range runner.specs.Specs() { + for _, spec := range runner.processedSpecs { if filter(spec) { count++ } @@ -302,32 +331,37 @@ func (runner *SpecRunner) countSpecsSatisfying(filter func(ex *spec.Spec) bool) return count } -func (runner *SpecRunner) summary(success bool) *types.SuiteSummary { - numberOfSpecsThatWillBeRun := runner.countSpecsSatisfying(func(ex *spec.Spec) bool { +func (runner *SpecRunner) suiteDidEndSummary(success bool) *types.SuiteSummary { + numberOfSpecsThatWillBeRun := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool { return !ex.Skipped() && !ex.Pending() }) - numberOfPendingSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool { + numberOfPendingSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool { return ex.Pending() }) - numberOfSkippedSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool { + numberOfSkippedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool { return ex.Skipped() }) - numberOfPassedSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool { + numberOfPassedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool { return ex.Passed() }) - numberOfFlakedSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool { + numberOfFlakedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool { return ex.Flaked() }) - numberOfFailedSpecs := runner.countSpecsSatisfying(func(ex *spec.Spec) bool { + numberOfFailedSpecs := runner.countSpecsThatRanSatisfying(func(ex *spec.Spec) bool { return ex.Failed() }) if runner.beforeSuiteNode != nil && !runner.beforeSuiteNode.Passed() && !runner.config.DryRun { + var known bool + numberOfSpecsThatWillBeRun, known = runner.iterator.NumberOfSpecsThatWillBeRunIfKnown() + if !known { + numberOfSpecsThatWillBeRun = runner.iterator.NumberOfSpecsPriorToIteration() + } numberOfFailedSpecs = numberOfSpecsThatWillBeRun } @@ -336,8 +370,8 @@ func (runner *SpecRunner) summary(success bool) *types.SuiteSummary { SuiteSucceeded: success, SuiteID: runner.suiteID, - NumberOfSpecsBeforeParallelization: runner.specs.NumberOfOriginalSpecs(), - NumberOfTotalSpecs: len(runner.specs.Specs()), + NumberOfSpecsBeforeParallelization: runner.iterator.NumberOfSpecsPriorToIteration(), + NumberOfTotalSpecs: len(runner.processedSpecs), NumberOfSpecsThatWillBeRun: numberOfSpecsThatWillBeRun, NumberOfPendingSpecs: numberOfPendingSpecs, NumberOfSkippedSpecs: numberOfSkippedSpecs, @@ -346,3 +380,29 @@ func (runner *SpecRunner) summary(success bool) *types.SuiteSummary { NumberOfFlakedSpecs: numberOfFlakedSpecs, } } + +func (runner *SpecRunner) suiteWillBeginSummary() *types.SuiteSummary { + numTotal, known := runner.iterator.NumberOfSpecsToProcessIfKnown() + if !known { + numTotal = -1 + } + + numToRun, known := runner.iterator.NumberOfSpecsThatWillBeRunIfKnown() + if !known { + numToRun = -1 + } + + return &types.SuiteSummary{ + SuiteDescription: runner.description, + SuiteID: runner.suiteID, + + NumberOfSpecsBeforeParallelization: runner.iterator.NumberOfSpecsPriorToIteration(), + NumberOfTotalSpecs: numTotal, + NumberOfSpecsThatWillBeRun: numToRun, + NumberOfPendingSpecs: -1, + NumberOfSkippedSpecs: -1, + NumberOfPassedSpecs: -1, + NumberOfFailedSpecs: -1, + NumberOfFlakedSpecs: -1, + } +} diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/internal/suite/suite.go b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/internal/suite/suite.go index 949bd34f..698a6e56 100644 --- a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/internal/suite/suite.go +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/internal/suite/suite.go @@ -2,8 +2,11 @@ package suite import ( "math/rand" + "net/http" "time" + "github.com/onsi/ginkgo/internal/spec_iterator" + "github.com/onsi/ginkgo/config" "github.com/onsi/ginkgo/internal/containernode" "github.com/onsi/ginkgo/internal/failer" @@ -52,18 +55,18 @@ func (suite *Suite) Run(t ginkgoTestingT, description string, reporters []report r := rand.New(rand.NewSource(config.RandomSeed)) suite.topLevelContainer.Shuffle(r) - specs := suite.generateSpecs(description, config) - suite.runner = specrunner.New(description, suite.beforeSuiteNode, specs, suite.afterSuiteNode, reporters, writer, config) + iterator, hasProgrammaticFocus := suite.generateSpecsIterator(description, config) + suite.runner = specrunner.New(description, suite.beforeSuiteNode, iterator, suite.afterSuiteNode, reporters, writer, config) suite.running = true success := suite.runner.Run() if !success { t.Fail() } - return success, specs.HasProgrammaticFocus() + return success, hasProgrammaticFocus } -func (suite *Suite) generateSpecs(description string, config config.GinkgoConfigType) *spec.Specs { +func (suite *Suite) generateSpecsIterator(description string, config config.GinkgoConfigType) (spec_iterator.SpecIterator, bool) { specsSlice := []*spec.Spec{} suite.topLevelContainer.BackPropagateProgrammaticFocus() for _, collatedNodes := range suite.topLevelContainer.Collate() { @@ -83,10 +86,19 @@ func (suite *Suite) generateSpecs(description string, config config.GinkgoConfig specs.SkipMeasurements() } + var iterator spec_iterator.SpecIterator + if config.ParallelTotal > 1 { - specs.TrimForParallelization(config.ParallelTotal, config.ParallelNode) + iterator = spec_iterator.NewParallelIterator(specs.Specs(), config.SyncHost) + resp, err := http.Get(config.SyncHost + "/has-counter") + if err != nil || resp.StatusCode != http.StatusOK { + iterator = spec_iterator.NewShardedParallelIterator(specs.Specs(), config.ParallelTotal, config.ParallelNode) + } + } else { + iterator = spec_iterator.NewSerialIterator(specs.Specs()) } - return specs + + return iterator, specs.HasProgrammaticFocus() } func (suite *Suite) CurrentRunningSpecSummary() (*types.SpecSummary, bool) { diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go index ac6540f0..6739c3f6 100644 --- a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/internal/writer/fake_writer.go @@ -26,6 +26,11 @@ func (writer *FakeGinkgoWriter) DumpOutWithHeader(header string) { writer.EventStream = append(writer.EventStream, "DUMP_WITH_HEADER: "+header) } +func (writer *FakeGinkgoWriter) Bytes() []byte { + writer.EventStream = append(writer.EventStream, "BYTES") + return nil +} + func (writer *FakeGinkgoWriter) Write(data []byte) (n int, err error) { return 0, nil } diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/internal/writer/writer.go b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/internal/writer/writer.go index 7678fc1d..6b23b1a6 100644 --- a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/internal/writer/writer.go +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/internal/writer/writer.go @@ -12,6 +12,7 @@ type WriterInterface interface { Truncate() DumpOut() DumpOutWithHeader(header string) + Bytes() []byte } type Writer struct { @@ -40,11 +41,11 @@ func (w *Writer) Write(b []byte) (n int, err error) { w.lock.Lock() defer w.lock.Unlock() + n, err = w.buffer.Write(b) if w.stream { return w.outWriter.Write(b) - } else { - return w.buffer.Write(b) } + return n, err } func (w *Writer) Truncate() { @@ -61,6 +62,15 @@ func (w *Writer) DumpOut() { } } +func (w *Writer) Bytes() []byte { + w.lock.Lock() + defer w.lock.Unlock() + b := w.buffer.Bytes() + copied := make([]byte, len(b)) + copy(copied, b) + return copied +} + func (w *Writer) DumpOutWithHeader(header string) { w.lock.Lock() defer w.lock.Unlock() diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go index 044d2dfd..fb82f70a 100644 --- a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/default_reporter.go @@ -29,9 +29,10 @@ func NewDefaultReporter(config config.DefaultReporterConfigType, stenographer st func (reporter *DefaultReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { reporter.stenographer.AnnounceSuite(summary.SuiteDescription, config.RandomSeed, config.RandomizeAllSpecs, reporter.config.Succinct) if config.ParallelTotal > 1 { - reporter.stenographer.AnnounceParallelRun(config.ParallelNode, config.ParallelTotal, summary.NumberOfTotalSpecs, summary.NumberOfSpecsBeforeParallelization, reporter.config.Succinct) + reporter.stenographer.AnnounceParallelRun(config.ParallelNode, config.ParallelTotal, reporter.config.Succinct) + } else { + reporter.stenographer.AnnounceNumberOfSpecs(summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, reporter.config.Succinct) } - reporter.stenographer.AnnounceNumberOfSpecs(summary.NumberOfSpecsThatWillBeRun, summary.NumberOfTotalSpecs, reporter.config.Succinct) } func (reporter *DefaultReporter) BeforeSuiteDidRun(setupSummary *types.SetupSummary) { diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go index 6cfe390b..89b03513 100644 --- a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/junit_reporter.go @@ -11,10 +11,11 @@ package reporters import ( "encoding/xml" "fmt" - "github.com/onsi/ginkgo/config" - "github.com/onsi/ginkgo/types" "os" "strings" + + "github.com/onsi/ginkgo/config" + "github.com/onsi/ginkgo/types" ) type JUnitTestSuite struct { @@ -31,6 +32,7 @@ type JUnitTestCase struct { FailureMessage *JUnitFailureMessage `xml:"failure,omitempty"` Skipped *JUnitSkipped `xml:"skipped,omitempty"` Time float64 `xml:"time,attr"` + SystemOut string `xml:"system-out,omitempty"` } type JUnitFailureMessage struct { @@ -57,7 +59,6 @@ func NewJUnitReporter(filename string) *JUnitReporter { func (reporter *JUnitReporter) SpecSuiteWillBegin(config config.GinkgoConfigType, summary *types.SuiteSummary) { reporter.suite = JUnitTestSuite{ - Tests: summary.NumberOfSpecsThatWillBeRun, TestCases: []JUnitTestCase{}, } reporter.testSuiteName = summary.SuiteDescription @@ -89,6 +90,7 @@ func (reporter *JUnitReporter) handleSetupSummary(name string, setupSummary *typ Type: reporter.failureTypeForState(setupSummary.State), Message: failureMessage(setupSummary.Failure), } + testCase.SystemOut = setupSummary.CapturedOutput testCase.Time = setupSummary.RunTime.Seconds() reporter.suite.TestCases = append(reporter.suite.TestCases, testCase) } @@ -104,6 +106,7 @@ func (reporter *JUnitReporter) SpecDidComplete(specSummary *types.SpecSummary) { Type: reporter.failureTypeForState(specSummary.State), Message: failureMessage(specSummary.Failure), } + testCase.SystemOut = specSummary.CapturedOutput } if specSummary.State == types.SpecStateSkipped || specSummary.State == types.SpecStatePending { testCase.Skipped = &JUnitSkipped{} @@ -113,6 +116,7 @@ func (reporter *JUnitReporter) SpecDidComplete(specSummary *types.SpecSummary) { } func (reporter *JUnitReporter) SpecSuiteDidEnd(summary *types.SuiteSummary) { + reporter.suite.Tests = summary.NumberOfSpecsThatWillBeRun reporter.suite.Time = summary.RunTime.Seconds() reporter.suite.Failures = summary.NumberOfFailedSpecs file, err := os.Create(reporter.filename) diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go index ce5433af..45b8f886 100644 --- a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/console_logging.go @@ -22,24 +22,24 @@ func (s *consoleStenographer) colorize(colorCode string, format string, args ... } func (s *consoleStenographer) printBanner(text string, bannerCharacter string) { - fmt.Println(text) - fmt.Println(strings.Repeat(bannerCharacter, len(text))) + fmt.Fprintln(s.w, text) + fmt.Fprintln(s.w, strings.Repeat(bannerCharacter, len(text))) } func (s *consoleStenographer) printNewLine() { - fmt.Println("") + fmt.Fprintln(s.w, "") } func (s *consoleStenographer) printDelimiter() { - fmt.Println(s.colorize(grayColor, "%s", strings.Repeat("-", 30))) + fmt.Fprintln(s.w, s.colorize(grayColor, "%s", strings.Repeat("-", 30))) } func (s *consoleStenographer) print(indentation int, format string, args ...interface{}) { - fmt.Print(s.indent(indentation, format, args...)) + fmt.Fprint(s.w, s.indent(indentation, format, args...)) } func (s *consoleStenographer) println(indentation int, format string, args ...interface{}) { - fmt.Println(s.indent(indentation, format, args...)) + fmt.Fprintln(s.w, s.indent(indentation, format, args...)) } func (s *consoleStenographer) indent(indentation int, format string, args ...interface{}) string { diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go index 1ff6104c..98854e7d 100644 --- a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/fake_stenographer.go @@ -74,14 +74,18 @@ func (stenographer *FakeStenographer) AnnounceAggregatedParallelRun(nodes int, s stenographer.registerCall("AnnounceAggregatedParallelRun", nodes, succinct) } -func (stenographer *FakeStenographer) AnnounceParallelRun(node int, nodes int, specsToRun int, totalSpecs int, succinct bool) { - stenographer.registerCall("AnnounceParallelRun", node, nodes, specsToRun, totalSpecs, succinct) +func (stenographer *FakeStenographer) AnnounceParallelRun(node int, nodes int, succinct bool) { + stenographer.registerCall("AnnounceParallelRun", node, nodes, succinct) } func (stenographer *FakeStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) { stenographer.registerCall("AnnounceNumberOfSpecs", specsToRun, total, succinct) } +func (stenographer *FakeStenographer) AnnounceTotalNumberOfSpecs(total int, succinct bool) { + stenographer.registerCall("AnnounceTotalNumberOfSpecs", total, succinct) +} + func (stenographer *FakeStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) { stenographer.registerCall("AnnounceSpecRunCompletion", summary, succinct) } diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go index 05ab9368..fefd3e18 100644 --- a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/stenographer.go @@ -8,9 +8,11 @@ package stenographer import ( "fmt" + "io" "runtime" "strings" + "github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable" "github.com/onsi/ginkgo/types" ) @@ -35,7 +37,8 @@ const ( type Stenographer interface { AnnounceSuite(description string, randomSeed int64, randomizingAll bool, succinct bool) AnnounceAggregatedParallelRun(nodes int, succinct bool) - AnnounceParallelRun(node int, nodes int, specsToRun int, totalSpecs int, succinct bool) + AnnounceParallelRun(node int, nodes int, succinct bool) + AnnounceTotalNumberOfSpecs(total int, succinct bool) AnnounceNumberOfSpecs(specsToRun int, total int, succinct bool) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) @@ -69,15 +72,16 @@ func New(color bool, enableFlakes bool) Stenographer { denoter: denoter, cursorState: cursorStateTop, enableFlakes: enableFlakes, + w: colorable.NewColorableStdout(), } } type consoleStenographer struct { - color bool - denoter string - cursorState cursorStateType - // Whether to print flake counts. + color bool + denoter string + cursorState cursorStateType enableFlakes bool + w io.Writer } var alternatingColors = []string{defaultStyle, grayColor} @@ -95,17 +99,15 @@ func (s *consoleStenographer) AnnounceSuite(description string, randomSeed int64 s.printNewLine() } -func (s *consoleStenographer) AnnounceParallelRun(node int, nodes int, specsToRun int, totalSpecs int, succinct bool) { +func (s *consoleStenographer) AnnounceParallelRun(node int, nodes int, succinct bool) { if succinct { s.print(0, "- node #%d ", node) return } s.println(0, - "Parallel test node %s/%s. Assigned %s of %s specs.", + "Parallel test node %s/%s.", s.colorize(boldStyle, "%d", node), s.colorize(boldStyle, "%d", nodes), - s.colorize(boldStyle, "%d", specsToRun), - s.colorize(boldStyle, "%d", totalSpecs), ) s.printNewLine() } @@ -137,6 +139,20 @@ func (s *consoleStenographer) AnnounceNumberOfSpecs(specsToRun int, total int, s s.printNewLine() } +func (s *consoleStenographer) AnnounceTotalNumberOfSpecs(total int, succinct bool) { + if succinct { + s.print(0, "- %d specs ", total) + s.stream() + return + } + s.println(0, + "Will run %s specs", + s.colorize(boldStyle, "%d", total), + ) + + s.printNewLine() +} + func (s *consoleStenographer) AnnounceSpecRunCompletion(summary *types.SuiteSummary, succinct bool) { if succinct && summary.SuiteSucceeded { s.print(0, " %s %s ", s.colorize(greenColor, "SUCCESS!"), summary.RunTime) diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/LICENSE b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/LICENSE new file mode 100644 index 00000000..91b5cef3 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/README.md b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/README.md new file mode 100644 index 00000000..e84226a7 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/README.md @@ -0,0 +1,43 @@ +# go-colorable + +Colorable writer for windows. + +For example, most of logger packages doesn't show colors on windows. (I know we can do it with ansicon. But I don't want.) +This package is possible to handle escape sequence for ansi color on windows. + +## Too Bad! + +![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/bad.png) + + +## So Good! + +![](https://raw.githubusercontent.com/mattn/go-colorable/gh-pages/good.png) + +## Usage + +```go +logrus.SetFormatter(&logrus.TextFormatter{ForceColors: true}) +logrus.SetOutput(colorable.NewColorableStdout()) + +logrus.Info("succeeded") +logrus.Warn("not correct") +logrus.Error("something error") +logrus.Fatal("panic") +``` + +You can compile above code on non-windows OSs. + +## Installation + +``` +$ go get github.com/mattn/go-colorable +``` + +# License + +MIT + +# Author + +Yasuhiro Matsumoto (a.k.a mattn) diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_others.go b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_others.go new file mode 100644 index 00000000..52d6653b --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_others.go @@ -0,0 +1,24 @@ +// +build !windows + +package colorable + +import ( + "io" + "os" +) + +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + return file +} + +func NewColorableStdout() io.Writer { + return os.Stdout +} + +func NewColorableStderr() io.Writer { + return os.Stderr +} diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_windows.go b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_windows.go new file mode 100644 index 00000000..10880092 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/colorable_windows.go @@ -0,0 +1,783 @@ +package colorable + +import ( + "bytes" + "fmt" + "io" + "math" + "os" + "strconv" + "strings" + "syscall" + "unsafe" + + "github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty" +) + +const ( + foregroundBlue = 0x1 + foregroundGreen = 0x2 + foregroundRed = 0x4 + foregroundIntensity = 0x8 + foregroundMask = (foregroundRed | foregroundBlue | foregroundGreen | foregroundIntensity) + backgroundBlue = 0x10 + backgroundGreen = 0x20 + backgroundRed = 0x40 + backgroundIntensity = 0x80 + backgroundMask = (backgroundRed | backgroundBlue | backgroundGreen | backgroundIntensity) +) + +type wchar uint16 +type short int16 +type dword uint32 +type word uint16 + +type coord struct { + x short + y short +} + +type smallRect struct { + left short + top short + right short + bottom short +} + +type consoleScreenBufferInfo struct { + size coord + cursorPosition coord + attributes word + window smallRect + maximumWindowSize coord +} + +var ( + kernel32 = syscall.NewLazyDLL("kernel32.dll") + procGetConsoleScreenBufferInfo = kernel32.NewProc("GetConsoleScreenBufferInfo") + procSetConsoleTextAttribute = kernel32.NewProc("SetConsoleTextAttribute") + procSetConsoleCursorPosition = kernel32.NewProc("SetConsoleCursorPosition") + procFillConsoleOutputCharacter = kernel32.NewProc("FillConsoleOutputCharacterW") + procFillConsoleOutputAttribute = kernel32.NewProc("FillConsoleOutputAttribute") +) + +type Writer struct { + out io.Writer + handle syscall.Handle + lastbuf bytes.Buffer + oldattr word +} + +func NewColorable(file *os.File) io.Writer { + if file == nil { + panic("nil passed instead of *os.File to NewColorable()") + } + + if isatty.IsTerminal(file.Fd()) { + var csbi consoleScreenBufferInfo + handle := syscall.Handle(file.Fd()) + procGetConsoleScreenBufferInfo.Call(uintptr(handle), uintptr(unsafe.Pointer(&csbi))) + return &Writer{out: file, handle: handle, oldattr: csbi.attributes} + } else { + return file + } +} + +func NewColorableStdout() io.Writer { + return NewColorable(os.Stdout) +} + +func NewColorableStderr() io.Writer { + return NewColorable(os.Stderr) +} + +var color256 = map[int]int{ + 0: 0x000000, + 1: 0x800000, + 2: 0x008000, + 3: 0x808000, + 4: 0x000080, + 5: 0x800080, + 6: 0x008080, + 7: 0xc0c0c0, + 8: 0x808080, + 9: 0xff0000, + 10: 0x00ff00, + 11: 0xffff00, + 12: 0x0000ff, + 13: 0xff00ff, + 14: 0x00ffff, + 15: 0xffffff, + 16: 0x000000, + 17: 0x00005f, + 18: 0x000087, + 19: 0x0000af, + 20: 0x0000d7, + 21: 0x0000ff, + 22: 0x005f00, + 23: 0x005f5f, + 24: 0x005f87, + 25: 0x005faf, + 26: 0x005fd7, + 27: 0x005fff, + 28: 0x008700, + 29: 0x00875f, + 30: 0x008787, + 31: 0x0087af, + 32: 0x0087d7, + 33: 0x0087ff, + 34: 0x00af00, + 35: 0x00af5f, + 36: 0x00af87, + 37: 0x00afaf, + 38: 0x00afd7, + 39: 0x00afff, + 40: 0x00d700, + 41: 0x00d75f, + 42: 0x00d787, + 43: 0x00d7af, + 44: 0x00d7d7, + 45: 0x00d7ff, + 46: 0x00ff00, + 47: 0x00ff5f, + 48: 0x00ff87, + 49: 0x00ffaf, + 50: 0x00ffd7, + 51: 0x00ffff, + 52: 0x5f0000, + 53: 0x5f005f, + 54: 0x5f0087, + 55: 0x5f00af, + 56: 0x5f00d7, + 57: 0x5f00ff, + 58: 0x5f5f00, + 59: 0x5f5f5f, + 60: 0x5f5f87, + 61: 0x5f5faf, + 62: 0x5f5fd7, + 63: 0x5f5fff, + 64: 0x5f8700, + 65: 0x5f875f, + 66: 0x5f8787, + 67: 0x5f87af, + 68: 0x5f87d7, + 69: 0x5f87ff, + 70: 0x5faf00, + 71: 0x5faf5f, + 72: 0x5faf87, + 73: 0x5fafaf, + 74: 0x5fafd7, + 75: 0x5fafff, + 76: 0x5fd700, + 77: 0x5fd75f, + 78: 0x5fd787, + 79: 0x5fd7af, + 80: 0x5fd7d7, + 81: 0x5fd7ff, + 82: 0x5fff00, + 83: 0x5fff5f, + 84: 0x5fff87, + 85: 0x5fffaf, + 86: 0x5fffd7, + 87: 0x5fffff, + 88: 0x870000, + 89: 0x87005f, + 90: 0x870087, + 91: 0x8700af, + 92: 0x8700d7, + 93: 0x8700ff, + 94: 0x875f00, + 95: 0x875f5f, + 96: 0x875f87, + 97: 0x875faf, + 98: 0x875fd7, + 99: 0x875fff, + 100: 0x878700, + 101: 0x87875f, + 102: 0x878787, + 103: 0x8787af, + 104: 0x8787d7, + 105: 0x8787ff, + 106: 0x87af00, + 107: 0x87af5f, + 108: 0x87af87, + 109: 0x87afaf, + 110: 0x87afd7, + 111: 0x87afff, + 112: 0x87d700, + 113: 0x87d75f, + 114: 0x87d787, + 115: 0x87d7af, + 116: 0x87d7d7, + 117: 0x87d7ff, + 118: 0x87ff00, + 119: 0x87ff5f, + 120: 0x87ff87, + 121: 0x87ffaf, + 122: 0x87ffd7, + 123: 0x87ffff, + 124: 0xaf0000, + 125: 0xaf005f, + 126: 0xaf0087, + 127: 0xaf00af, + 128: 0xaf00d7, + 129: 0xaf00ff, + 130: 0xaf5f00, + 131: 0xaf5f5f, + 132: 0xaf5f87, + 133: 0xaf5faf, + 134: 0xaf5fd7, + 135: 0xaf5fff, + 136: 0xaf8700, + 137: 0xaf875f, + 138: 0xaf8787, + 139: 0xaf87af, + 140: 0xaf87d7, + 141: 0xaf87ff, + 142: 0xafaf00, + 143: 0xafaf5f, + 144: 0xafaf87, + 145: 0xafafaf, + 146: 0xafafd7, + 147: 0xafafff, + 148: 0xafd700, + 149: 0xafd75f, + 150: 0xafd787, + 151: 0xafd7af, + 152: 0xafd7d7, + 153: 0xafd7ff, + 154: 0xafff00, + 155: 0xafff5f, + 156: 0xafff87, + 157: 0xafffaf, + 158: 0xafffd7, + 159: 0xafffff, + 160: 0xd70000, + 161: 0xd7005f, + 162: 0xd70087, + 163: 0xd700af, + 164: 0xd700d7, + 165: 0xd700ff, + 166: 0xd75f00, + 167: 0xd75f5f, + 168: 0xd75f87, + 169: 0xd75faf, + 170: 0xd75fd7, + 171: 0xd75fff, + 172: 0xd78700, + 173: 0xd7875f, + 174: 0xd78787, + 175: 0xd787af, + 176: 0xd787d7, + 177: 0xd787ff, + 178: 0xd7af00, + 179: 0xd7af5f, + 180: 0xd7af87, + 181: 0xd7afaf, + 182: 0xd7afd7, + 183: 0xd7afff, + 184: 0xd7d700, + 185: 0xd7d75f, + 186: 0xd7d787, + 187: 0xd7d7af, + 188: 0xd7d7d7, + 189: 0xd7d7ff, + 190: 0xd7ff00, + 191: 0xd7ff5f, + 192: 0xd7ff87, + 193: 0xd7ffaf, + 194: 0xd7ffd7, + 195: 0xd7ffff, + 196: 0xff0000, + 197: 0xff005f, + 198: 0xff0087, + 199: 0xff00af, + 200: 0xff00d7, + 201: 0xff00ff, + 202: 0xff5f00, + 203: 0xff5f5f, + 204: 0xff5f87, + 205: 0xff5faf, + 206: 0xff5fd7, + 207: 0xff5fff, + 208: 0xff8700, + 209: 0xff875f, + 210: 0xff8787, + 211: 0xff87af, + 212: 0xff87d7, + 213: 0xff87ff, + 214: 0xffaf00, + 215: 0xffaf5f, + 216: 0xffaf87, + 217: 0xffafaf, + 218: 0xffafd7, + 219: 0xffafff, + 220: 0xffd700, + 221: 0xffd75f, + 222: 0xffd787, + 223: 0xffd7af, + 224: 0xffd7d7, + 225: 0xffd7ff, + 226: 0xffff00, + 227: 0xffff5f, + 228: 0xffff87, + 229: 0xffffaf, + 230: 0xffffd7, + 231: 0xffffff, + 232: 0x080808, + 233: 0x121212, + 234: 0x1c1c1c, + 235: 0x262626, + 236: 0x303030, + 237: 0x3a3a3a, + 238: 0x444444, + 239: 0x4e4e4e, + 240: 0x585858, + 241: 0x626262, + 242: 0x6c6c6c, + 243: 0x767676, + 244: 0x808080, + 245: 0x8a8a8a, + 246: 0x949494, + 247: 0x9e9e9e, + 248: 0xa8a8a8, + 249: 0xb2b2b2, + 250: 0xbcbcbc, + 251: 0xc6c6c6, + 252: 0xd0d0d0, + 253: 0xdadada, + 254: 0xe4e4e4, + 255: 0xeeeeee, +} + +func (w *Writer) Write(data []byte) (n int, err error) { + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + + er := bytes.NewBuffer(data) +loop: + for { + r1, _, err := procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + if r1 == 0 { + break loop + } + + c1, _, err := er.ReadRune() + if err != nil { + break loop + } + if c1 != 0x1b { + fmt.Fprint(w.out, string(c1)) + continue + } + c2, _, err := er.ReadRune() + if err != nil { + w.lastbuf.WriteRune(c1) + break loop + } + if c2 != 0x5b { + w.lastbuf.WriteRune(c1) + w.lastbuf.WriteRune(c2) + continue + } + + var buf bytes.Buffer + var m rune + for { + c, _, err := er.ReadRune() + if err != nil { + w.lastbuf.WriteRune(c1) + w.lastbuf.WriteRune(c2) + w.lastbuf.Write(buf.Bytes()) + break loop + } + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + m = c + break + } + buf.Write([]byte(string(c))) + } + + var csbi consoleScreenBufferInfo + switch m { + case 'A': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'B': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'C': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x -= short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'D': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + if n, err = strconv.Atoi(buf.String()); err == nil { + var csbi consoleScreenBufferInfo + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x += short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + } + case 'E': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y += short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'F': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = 0 + csbi.cursorPosition.y -= short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'G': + n, err = strconv.Atoi(buf.String()) + if err != nil { + continue + } + procGetConsoleScreenBufferInfo.Call(uintptr(w.handle), uintptr(unsafe.Pointer(&csbi))) + csbi.cursorPosition.x = short(n) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'H': + token := strings.Split(buf.String(), ";") + if len(token) != 2 { + continue + } + n1, err := strconv.Atoi(token[0]) + if err != nil { + continue + } + n2, err := strconv.Atoi(token[1]) + if err != nil { + continue + } + csbi.cursorPosition.x = short(n2) + csbi.cursorPosition.x = short(n1) + procSetConsoleCursorPosition.Call(uintptr(w.handle), *(*uintptr)(unsafe.Pointer(&csbi.cursorPosition))) + case 'J': + n, err := strconv.Atoi(buf.String()) + if err != nil { + continue + } + var cursor coord + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + case 1: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + case 2: + cursor = coord{x: csbi.window.left, y: csbi.window.top} + } + var count, written dword + count = dword(csbi.size.x - csbi.cursorPosition.x + (csbi.size.y-csbi.cursorPosition.y)*csbi.size.x) + procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'K': + n, err := strconv.Atoi(buf.String()) + if err != nil { + continue + } + var cursor coord + switch n { + case 0: + cursor = coord{x: csbi.cursorPosition.x, y: csbi.cursorPosition.y} + case 1: + cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y} + case 2: + cursor = coord{x: csbi.window.left, y: csbi.window.top + csbi.cursorPosition.y} + } + var count, written dword + count = dword(csbi.size.x - csbi.cursorPosition.x) + procFillConsoleOutputCharacter.Call(uintptr(w.handle), uintptr(' '), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + procFillConsoleOutputAttribute.Call(uintptr(w.handle), uintptr(csbi.attributes), uintptr(count), *(*uintptr)(unsafe.Pointer(&cursor)), uintptr(unsafe.Pointer(&written))) + case 'm': + attr := csbi.attributes + cs := buf.String() + if cs == "" { + procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(w.oldattr)) + continue + } + token := strings.Split(cs, ";") + for i := 0; i < len(token); i += 1 { + ns := token[i] + if n, err = strconv.Atoi(ns); err == nil { + switch { + case n == 0 || n == 100: + attr = w.oldattr + case 1 <= n && n <= 5: + attr |= foregroundIntensity + case n == 7: + attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) + case 22 == n || n == 25 || n == 25: + attr |= foregroundIntensity + case n == 27: + attr = ((attr & foregroundMask) << 4) | ((attr & backgroundMask) >> 4) + case 30 <= n && n <= 37: + attr = (attr & backgroundMask) + if (n-30)&1 != 0 { + attr |= foregroundRed + } + if (n-30)&2 != 0 { + attr |= foregroundGreen + } + if (n-30)&4 != 0 { + attr |= foregroundBlue + } + case n == 38: // set foreground color. + if i < len(token)-2 && (token[i+1] == "5" || token[i+1] == "05") { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256foreAttr == nil { + n256setup() + } + attr &= backgroundMask + attr |= n256foreAttr[n256] + i += 2 + } + } else { + attr = attr & (w.oldattr & backgroundMask) + } + case n == 39: // reset foreground color. + attr &= backgroundMask + attr |= w.oldattr & foregroundMask + case 40 <= n && n <= 47: + attr = (attr & foregroundMask) + if (n-40)&1 != 0 { + attr |= backgroundRed + } + if (n-40)&2 != 0 { + attr |= backgroundGreen + } + if (n-40)&4 != 0 { + attr |= backgroundBlue + } + case n == 48: // set background color. + if i < len(token)-2 && token[i+1] == "5" { + if n256, err := strconv.Atoi(token[i+2]); err == nil { + if n256backAttr == nil { + n256setup() + } + attr &= foregroundMask + attr |= n256backAttr[n256] + i += 2 + } + } else { + attr = attr & (w.oldattr & foregroundMask) + } + case n == 49: // reset foreground color. + attr &= foregroundMask + attr |= w.oldattr & backgroundMask + case 90 <= n && n <= 97: + attr = (attr & backgroundMask) + attr |= foregroundIntensity + if (n-90)&1 != 0 { + attr |= foregroundRed + } + if (n-90)&2 != 0 { + attr |= foregroundGreen + } + if (n-90)&4 != 0 { + attr |= foregroundBlue + } + case 100 <= n && n <= 107: + attr = (attr & foregroundMask) + attr |= backgroundIntensity + if (n-100)&1 != 0 { + attr |= backgroundRed + } + if (n-100)&2 != 0 { + attr |= backgroundGreen + } + if (n-100)&4 != 0 { + attr |= backgroundBlue + } + } + procSetConsoleTextAttribute.Call(uintptr(w.handle), uintptr(attr)) + } + } + } + } + return len(data) - w.lastbuf.Len(), nil +} + +type consoleColor struct { + rgb int + red bool + green bool + blue bool + intensity bool +} + +func (c consoleColor) foregroundAttr() (attr word) { + if c.red { + attr |= foregroundRed + } + if c.green { + attr |= foregroundGreen + } + if c.blue { + attr |= foregroundBlue + } + if c.intensity { + attr |= foregroundIntensity + } + return +} + +func (c consoleColor) backgroundAttr() (attr word) { + if c.red { + attr |= backgroundRed + } + if c.green { + attr |= backgroundGreen + } + if c.blue { + attr |= backgroundBlue + } + if c.intensity { + attr |= backgroundIntensity + } + return +} + +var color16 = []consoleColor{ + consoleColor{0x000000, false, false, false, false}, + consoleColor{0x000080, false, false, true, false}, + consoleColor{0x008000, false, true, false, false}, + consoleColor{0x008080, false, true, true, false}, + consoleColor{0x800000, true, false, false, false}, + consoleColor{0x800080, true, false, true, false}, + consoleColor{0x808000, true, true, false, false}, + consoleColor{0xc0c0c0, true, true, true, false}, + consoleColor{0x808080, false, false, false, true}, + consoleColor{0x0000ff, false, false, true, true}, + consoleColor{0x00ff00, false, true, false, true}, + consoleColor{0x00ffff, false, true, true, true}, + consoleColor{0xff0000, true, false, false, true}, + consoleColor{0xff00ff, true, false, true, true}, + consoleColor{0xffff00, true, true, false, true}, + consoleColor{0xffffff, true, true, true, true}, +} + +type hsv struct { + h, s, v float32 +} + +func (a hsv) dist(b hsv) float32 { + dh := a.h - b.h + switch { + case dh > 0.5: + dh = 1 - dh + case dh < -0.5: + dh = -1 - dh + } + ds := a.s - b.s + dv := a.v - b.v + return float32(math.Sqrt(float64(dh*dh + ds*ds + dv*dv))) +} + +func toHSV(rgb int) hsv { + r, g, b := float32((rgb&0xFF0000)>>16)/256.0, + float32((rgb&0x00FF00)>>8)/256.0, + float32(rgb&0x0000FF)/256.0 + min, max := minmax3f(r, g, b) + h := max - min + if h > 0 { + if max == r { + h = (g - b) / h + if h < 0 { + h += 6 + } + } else if max == g { + h = 2 + (b-r)/h + } else { + h = 4 + (r-g)/h + } + } + h /= 6.0 + s := max - min + if max != 0 { + s /= max + } + v := max + return hsv{h: h, s: s, v: v} +} + +type hsvTable []hsv + +func toHSVTable(rgbTable []consoleColor) hsvTable { + t := make(hsvTable, len(rgbTable)) + for i, c := range rgbTable { + t[i] = toHSV(c.rgb) + } + return t +} + +func (t hsvTable) find(rgb int) consoleColor { + hsv := toHSV(rgb) + n := 7 + l := float32(5.0) + for i, p := range t { + d := hsv.dist(p) + if d < l { + l, n = d, i + } + } + return color16[n] +} + +func minmax3f(a, b, c float32) (min, max float32) { + if a < b { + if b < c { + return a, c + } else if a < c { + return a, b + } else { + return c, b + } + } else { + if a < c { + return b, c + } else if b < c { + return b, a + } else { + return c, a + } + } +} + +var n256foreAttr []word +var n256backAttr []word + +func n256setup() { + n256foreAttr = make([]word, 256) + n256backAttr = make([]word, 256) + t := toHSVTable(color16) + for i, rgb := range color256 { + c := t.find(rgb) + n256foreAttr[i] = c.foregroundAttr() + n256backAttr[i] = c.backgroundAttr() + } +} diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/noncolorable.go b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/noncolorable.go new file mode 100644 index 00000000..fb976dbd --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable/noncolorable.go @@ -0,0 +1,57 @@ +package colorable + +import ( + "bytes" + "fmt" + "io" +) + +type NonColorable struct { + out io.Writer + lastbuf bytes.Buffer +} + +func NewNonColorable(w io.Writer) io.Writer { + return &NonColorable{out: w} +} + +func (w *NonColorable) Write(data []byte) (n int, err error) { + er := bytes.NewBuffer(data) +loop: + for { + c1, _, err := er.ReadRune() + if err != nil { + break loop + } + if c1 != 0x1b { + fmt.Fprint(w.out, string(c1)) + continue + } + c2, _, err := er.ReadRune() + if err != nil { + w.lastbuf.WriteRune(c1) + break loop + } + if c2 != 0x5b { + w.lastbuf.WriteRune(c1) + w.lastbuf.WriteRune(c2) + continue + } + + var buf bytes.Buffer + for { + c, _, err := er.ReadRune() + if err != nil { + w.lastbuf.WriteRune(c1) + w.lastbuf.WriteRune(c2) + w.lastbuf.Write(buf.Bytes()) + break loop + } + if ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') || c == '@' { + break + } + buf.Write([]byte(string(c))) + } + } + return len(data) - w.lastbuf.Len(), nil +} diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/LICENSE b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/LICENSE new file mode 100644 index 00000000..65dc692b --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/LICENSE @@ -0,0 +1,9 @@ +Copyright (c) Yasuhiro MATSUMOTO + +MIT License (Expat) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/README.md b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/README.md new file mode 100644 index 00000000..74845de4 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/README.md @@ -0,0 +1,37 @@ +# go-isatty + +isatty for golang + +## Usage + +```go +package main + +import ( + "fmt" + "github.com/mattn/go-isatty" + "os" +) + +func main() { + if isatty.IsTerminal(os.Stdout.Fd()) { + fmt.Println("Is Terminal") + } else { + fmt.Println("Is Not Terminal") + } +} +``` + +## Installation + +``` +$ go get github.com/mattn/go-isatty +``` + +# License + +MIT + +# Author + +Yasuhiro Matsumoto (a.k.a mattn) diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/doc.go b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/doc.go new file mode 100644 index 00000000..17d4f90e --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/doc.go @@ -0,0 +1,2 @@ +// Package isatty implements interface to isatty +package isatty diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_appengine.go b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_appengine.go new file mode 100644 index 00000000..83c58877 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_appengine.go @@ -0,0 +1,9 @@ +// +build appengine + +package isatty + +// IsTerminal returns true if the file descriptor is terminal which +// is always false on on appengine classic which is a sandboxed PaaS. +func IsTerminal(fd uintptr) bool { + return false +} diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_bsd.go b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_bsd.go new file mode 100644 index 00000000..98ffe86a --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_bsd.go @@ -0,0 +1,18 @@ +// +build darwin freebsd openbsd netbsd +// +build !appengine + +package isatty + +import ( + "syscall" + "unsafe" +) + +const ioctlReadTermios = syscall.TIOCGETA + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_linux.go b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_linux.go new file mode 100644 index 00000000..9d24bac1 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_linux.go @@ -0,0 +1,18 @@ +// +build linux +// +build !appengine + +package isatty + +import ( + "syscall" + "unsafe" +) + +const ioctlReadTermios = syscall.TCGETS + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var termios syscall.Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, fd, ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_solaris.go b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_solaris.go new file mode 100644 index 00000000..1f0c6bf5 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_solaris.go @@ -0,0 +1,16 @@ +// +build solaris +// +build !appengine + +package isatty + +import ( + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +// see: http://src.illumos.org/source/xref/illumos-gate/usr/src/lib/libbc/libc/gen/common/isatty.c +func IsTerminal(fd uintptr) bool { + var termio unix.Termio + err := unix.IoctlSetTermio(int(fd), unix.TCGETA, &termio) + return err == nil +} diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_windows.go b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_windows.go new file mode 100644 index 00000000..83c398b1 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty/isatty_windows.go @@ -0,0 +1,19 @@ +// +build windows +// +build !appengine + +package isatty + +import ( + "syscall" + "unsafe" +) + +var kernel32 = syscall.NewLazyDLL("kernel32.dll") +var procGetConsoleMode = kernel32.NewProc("GetConsoleMode") + +// IsTerminal return true if the file descriptor is terminal. +func IsTerminal(fd uintptr) bool { + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, fd, uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/types/types.go b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/types/types.go index dcf95ba5..baf1bd1c 100644 --- a/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/types/types.go +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/ginkgo/types/types.go @@ -7,6 +7,19 @@ import ( const GINKGO_FOCUS_EXIT_CODE = 197 +/* +SuiteSummary represents the a summary of the test suite and is passed to both +Reporter.SpecSuiteWillBegin +Reporter.SpecSuiteDidEnd + +this is unfortunate as these two methods should receive different objects. When running in parallel +each node does not deterministically know how many specs it will end up running. + +Unfortunately making such a change would break backward compatibility. + +Until Ginkgo 2.0 comes out we will continue to reuse this struct but populate unkown fields +with -1. +*/ type SuiteSummary struct { SuiteDescription string SuiteSucceeded bool diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/CHANGELOG.md b/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/CHANGELOG.md index 0c5ede5d..a3e8ee44 100644 --- a/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,5 +1,7 @@ ## HEAD +## 1.2.0 + Improvements: - Added `BeSent` which attempts to send a value down a channel and fails if the attempt blocks. Can be paired with `Eventually` to safely send a value down a channel with a timeout. @@ -14,6 +16,8 @@ Improvements: - `ghttp` servers can take an `io.Writer`. `ghttp` will write a line to the writer when each request arrives. - Added `WithTransform` matcher to allow munging input data before feeding into the relevant matcher - Added boolean `And`, `Or`, and `Not` matchers to allow creating composite matchers +- Added `gbytes.TimeoutCloser`, `gbytes.TimeoutReader`, and `gbytes.TimeoutWriter` - these are convenience wrappers that timeout if the underlying Closer/Reader/Writer does not return within the alloted time. +- Added `gbytes.BufferReader` - this constructs a `gbytes.Buffer` that asynchronously reads the passed-in `io.Reader` into its buffer. Bug Fixes: - gexec: `session.Wait` now uses `EventuallyWithOffset` to get the right line number in the failure. diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/CONTRIBUTING.md b/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/CONTRIBUTING.md new file mode 100644 index 00000000..73d4020e --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/CONTRIBUTING.md @@ -0,0 +1,11 @@ +# Contributing to Gomega + +Your contributions to Gomega are essential for its long-term maintenance and improvement. To make a contribution: + +- Please **open an issue first** - describe what problem you are trying to solve and give the community a forum for input and feedback ahead of investing time in writing code! +- Ensure adequate test coverage: + - Make sure to add appropriate unit tests + - Please run all tests locally (`ginkgo -r -p`) and make sure they go green before submitting the PR +- Update the documentation. In addition to standard `godoc` comments Gomega has extensive documentation on the `gh-pages` branch. If relevant, please submit a docs PR to that branch alongside your code PR. + +Thanks for supporting Gomega! \ No newline at end of file diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/README.md b/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/README.md index d1add5ba..159be359 100644 --- a/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/README.md +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/README.md @@ -1,10 +1,10 @@ ![Gomega: Ginkgo's Preferred Matcher Library](http://onsi.github.io/gomega/images/gomega.png) -[![Build Status](https://travis-ci.org/onsi/gomega.png)](https://travis-ci.org/onsi/gomega) +[![Build Status](https://travis-ci.org/onsi/gomega.svg)](https://travis-ci.org/onsi/gomega) Jump straight to the [docs](http://onsi.github.io/gomega/) to learn about Gomega, including a list of [all available matchers](http://onsi.github.io/gomega/#provided-matchers). -To discuss Gomega and get updates, join the [google group](https://groups.google.com/d/forum/ginkgo-and-gomega). +If you have a question, comment, bug report, feature request, etc. please open a GitHub issue. ## [Ginkgo](http://github.com/onsi/ginkgo): a BDD Testing Framework for Golang diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/format/format.go b/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/format/format.go index 06355d94..e206ee59 100644 --- a/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/format/format.go +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/format/format.go @@ -6,8 +6,9 @@ package format import ( "fmt" "reflect" - "strings" "strconv" + "strings" + "time" ) // Use MaxDepth to set the maximum recursion depth when printing deeply nested objects @@ -22,6 +23,25 @@ Note that GoString and String don't always have all the information you need to */ var UseStringerRepresentation = false +/* +Print the content of context objects. By default it will be suppressed. + +Set PrintContextObjects = true to enable printing of the context internals. +*/ +var PrintContextObjects = false + +// Ctx interface defined here to keep backwards compatability with go < 1.7 +// It matches the context.Context interface +type Ctx interface { + Deadline() (deadline time.Time, ok bool) + Done() <-chan struct{} + Err() error + Value(key interface{}) interface{} +} + +var contextType = reflect.TypeOf((*Ctx)(nil)).Elem() +var timeType = reflect.TypeOf(time.Time{}) + //The default indentation string emitted by the format package var Indent = " " @@ -44,11 +64,85 @@ If expected is omited, then the message looks like: func Message(actual interface{}, message string, expected ...interface{}) string { if len(expected) == 0 { return fmt.Sprintf("Expected\n%s\n%s", Object(actual, 1), message) - } else { - return fmt.Sprintf("Expected\n%s\n%s\n%s", Object(actual, 1), message, Object(expected[0], 1)) } + return fmt.Sprintf("Expected\n%s\n%s\n%s", Object(actual, 1), message, Object(expected[0], 1)) } +/* + +Generates a nicely formatted matcher success / failure message + +Much like Message(...), but it attempts to pretty print diffs in strings + +Expected + : "...aaaaabaaaaa..." +to equal | + : "...aaaaazaaaaa..." + +*/ + +func MessageWithDiff(actual, message, expected string) string { + if len(actual) >= truncateThreshold && len(expected) >= truncateThreshold { + diffPoint := findFirstMismatch(actual, expected) + formattedActual := truncateAndFormat(actual, diffPoint) + formattedExpected := truncateAndFormat(expected, diffPoint) + + spacesBeforeFormattedMismatch := findFirstMismatch(formattedActual, formattedExpected) + + tabLength := 4 + spaceFromMessageToActual := tabLength + len(": ") - len(message) + padding := strings.Repeat(" ", spaceFromMessageToActual+spacesBeforeFormattedMismatch) + "|" + return Message(formattedActual, message+padding, formattedExpected) + } + return Message(actual, message, expected) +} + +func truncateAndFormat(str string, index int) string { + leftPadding := `...` + rightPadding := `...` + + start := index - charactersAroundMismatchToInclude + if start < 0 { + start = 0 + leftPadding = "" + } + + // slice index must include the mis-matched character + lengthOfMismatchedCharacter := 1 + end := index + charactersAroundMismatchToInclude + lengthOfMismatchedCharacter + if end > len(str) { + end = len(str) + rightPadding = "" + + } + return fmt.Sprintf("\"%s\"", leftPadding+str[start:end]+rightPadding) +} + +func findFirstMismatch(a, b string) int { + aSlice := strings.Split(a, "") + bSlice := strings.Split(b, "") + + for index, str := range aSlice { + if index > len(b) - 1 { + return index + } + if str != bSlice[index] { + return index + } + } + + if len(b) > len(a) { + return len(a) + 1 + } + + return 0 +} + +const ( + truncateThreshold = 50 + charactersAroundMismatchToInclude = 5 +) + /* Pretty prints the passed in object at the passed in indentation level. @@ -57,6 +151,8 @@ Object recurses into deeply nested objects emitting pretty-printed representatio Modify format.MaxDepth to control how deep the recursion is allowed to go Set format.UseStringerRepresentation to true to return object.GoString() or object.String() when available instead of recursing into the object. + +Set PrintContextObjects to true to print the content of objects implementing context.Context */ func Object(object interface{}, indentation uint) string { indent := strings.Repeat(Indent, int(indentation)) @@ -124,6 +220,12 @@ func formatValue(value reflect.Value, indentation uint) string { } } + if !PrintContextObjects { + if value.Type().Implements(contextType) && indentation > 1 { + return "" + } + } + switch value.Kind() { case reflect.Bool: return fmt.Sprintf("%v", value.Bool()) @@ -152,15 +254,18 @@ func formatValue(value reflect.Value, indentation uint) string { case reflect.Map: return formatMap(value, indentation) case reflect.Struct: + if value.Type() == timeType && value.CanInterface() { + t, _ := value.Interface().(time.Time) + return t.Format(time.RFC3339Nano) + } return formatStruct(value, indentation) case reflect.Interface: return formatValue(value.Elem(), indentation) default: if value.CanInterface() { return fmt.Sprintf("%#v", value.Interface()) - } else { - return fmt.Sprintf("%#v", value) } + return fmt.Sprintf("%#v", value) } } @@ -187,7 +292,7 @@ func formatString(object interface{}, indentation uint) string { } func formatSlice(v reflect.Value, indentation uint) string { - if v.Kind() == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 && isPrintableString(string(v.Bytes())){ + if v.Kind() == reflect.Slice && v.Type().Elem().Kind() == reflect.Uint8 && isPrintableString(string(v.Bytes())) { return formatString(v.Bytes(), indentation) } @@ -204,9 +309,8 @@ func formatSlice(v reflect.Value, indentation uint) string { if longest > longFormThreshold { indenter := strings.Repeat(Indent, int(indentation)) return fmt.Sprintf("[\n%s%s,\n%s]", indenter+Indent, strings.Join(result, ",\n"+indenter+Indent), indenter) - } else { - return fmt.Sprintf("[%s]", strings.Join(result, ", ")) } + return fmt.Sprintf("[%s]", strings.Join(result, ", ")) } func formatMap(v reflect.Value, indentation uint) string { @@ -216,7 +320,7 @@ func formatMap(v reflect.Value, indentation uint) string { longest := 0 for i, key := range v.MapKeys() { value := v.MapIndex(key) - result[i] = fmt.Sprintf("%s: %s", formatValue(key, 0), formatValue(value, indentation+1)) + result[i] = fmt.Sprintf("%s: %s", formatValue(key, indentation+1), formatValue(value, indentation+1)) if len(result[i]) > longest { longest = len(result[i]) } @@ -225,9 +329,8 @@ func formatMap(v reflect.Value, indentation uint) string { if longest > longFormThreshold { indenter := strings.Repeat(Indent, int(indentation)) return fmt.Sprintf("{\n%s%s,\n%s}", indenter+Indent, strings.Join(result, ",\n"+indenter+Indent), indenter) - } else { - return fmt.Sprintf("{%s}", strings.Join(result, ", ")) } + return fmt.Sprintf("{%s}", strings.Join(result, ", ")) } func formatStruct(v reflect.Value, indentation uint) string { @@ -248,9 +351,8 @@ func formatStruct(v reflect.Value, indentation uint) string { if longest > longFormThreshold { indenter := strings.Repeat(Indent, int(indentation)) return fmt.Sprintf("{\n%s%s,\n%s}", indenter+Indent, strings.Join(result, ",\n"+indenter+Indent), indenter) - } else { - return fmt.Sprintf("{%s}", strings.Join(result, ", ")) } + return fmt.Sprintf("{%s}", strings.Join(result, ", ")) } func isNilValue(a reflect.Value) bool { diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/gomega_dsl.go b/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/gomega_dsl.go index 78bd188c..0d0f563a 100644 --- a/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -24,7 +24,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.0" +const GOMEGA_VERSION = "1.2.0" const nilFailHandlerPanic = `You are trying to make an assertion, but Gomega's fail handler is nil. If you're using Ginkgo then you probably forgot to put your assertion in an It(). diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go b/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go index 7871fd43..ac891252 100644 --- a/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/internal/testingtsupport/testing_t_support.go @@ -9,7 +9,7 @@ import ( ) type gomegaTestingT interface { - Errorf(format string, args ...interface{}) + Fatalf(format string, args ...interface{}) } func BuildTestingTGomegaFailHandler(t gomegaTestingT) types.GomegaFailHandler { @@ -19,7 +19,7 @@ func BuildTestingTGomegaFailHandler(t gomegaTestingT) types.GomegaFailHandler { skip = callerSkip[0] } stackTrace := pruneStack(string(debug.Stack()), skip) - t.Errorf("\n%s\n%s", stackTrace, message) + t.Fatalf("\n%s\n%s", stackTrace, message) } } diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/matchers.go b/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/matchers.go index 0c30aa1c..e6e85d07 100644 --- a/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/matchers.go +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/matchers.go @@ -176,7 +176,7 @@ func MatchRegexp(regexp string, args ...interface{}) types.GomegaMatcher { } //ContainSubstring succeeds if actual is a string or stringer that contains the -//passed-in regexp. Optional arguments can be provided to construct the substring +//passed-in substring. Optional arguments can be provided to construct the substring //via fmt.Sprintf(). func ContainSubstring(substr string, args ...interface{}) types.GomegaMatcher { return &matchers.ContainSubstringMatcher{ @@ -214,6 +214,15 @@ func MatchJSON(json interface{}) types.GomegaMatcher { } } +//MatchXML succeeds if actual is a string or stringer of XML that matches +//the expected XML. The XMLs are decoded and the resulting objects are compared via +//reflect.DeepEqual so things like whitespaces shouldn't matter. +func MatchXML(xml interface{}) types.GomegaMatcher { + return &matchers.MatchXMLMatcher{ + XMLToMatch: xml, + } +} + //MatchYAML succeeds if actual is a string or stringer of YAML that matches //the expected YAML. The YAML's are decoded and the resulting objects are compared via //reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter. diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/matchers/and.go b/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/matchers/and.go index 94c42a7d..d83a2916 100644 --- a/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/matchers/and.go +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/matchers/and.go @@ -57,8 +57,7 @@ func (m *AndMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { } } return false // none of were going to change - } else { - // one of the matchers failed.. it must be able to change in order to affect the result - return oraclematcher.MatchMayChangeInTheFuture(m.firstFailedMatcher, actual) } + // one of the matchers failed.. it must be able to change in order to affect the result + return oraclematcher.MatchMayChangeInTheFuture(m.firstFailedMatcher, actual) } diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go b/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go index 52f83fe3..0c157f61 100644 --- a/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go @@ -2,8 +2,9 @@ package matchers import ( "fmt" - "github.com/onsi/gomega/format" "math" + + "github.com/onsi/gomega/format" ) type BeNumericallyMatcher struct { diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/matchers/equal_matcher.go b/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/matchers/equal_matcher.go index d1865973..befb7bdf 100644 --- a/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/matchers/equal_matcher.go +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/matchers/equal_matcher.go @@ -1,6 +1,7 @@ package matchers import ( + "bytes" "fmt" "reflect" @@ -15,10 +16,24 @@ func (matcher *EqualMatcher) Match(actual interface{}) (success bool, err error) if actual == nil && matcher.Expected == nil { return false, fmt.Errorf("Refusing to compare to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.") } + // Shortcut for byte slices. + // Comparing long byte slices with reflect.DeepEqual is very slow, + // so use bytes.Equal if actual and expected are both byte slices. + if actualByteSlice, ok := actual.([]byte); ok { + if expectedByteSlice, ok := matcher.Expected.([]byte); ok { + return bytes.Equal(actualByteSlice, expectedByteSlice), nil + } + } return reflect.DeepEqual(actual, matcher.Expected), nil } func (matcher *EqualMatcher) FailureMessage(actual interface{}) (message string) { + actualString, actualOK := actual.(string) + expectedString, expectedOK := matcher.Expected.(string) + if actualOK && expectedOK { + return format.MessageWithDiff(actualString, "to equal", expectedString) + } + return format.Message(actual, "to equal", matcher.Expected) } diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go b/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go index e61978a1..499bb583 100644 --- a/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go @@ -5,12 +5,14 @@ import ( "encoding/json" "fmt" "reflect" + "strings" "github.com/onsi/gomega/format" ) type MatchJSONMatcher struct { - JSONToMatch interface{} + JSONToMatch interface{} + firstFailurePath []interface{} } func (matcher *MatchJSONMatcher) Match(actual interface{}) (success bool, err error) { @@ -25,18 +27,45 @@ func (matcher *MatchJSONMatcher) Match(actual interface{}) (success bool, err er // this is guarded by prettyPrint json.Unmarshal([]byte(actualString), &aval) json.Unmarshal([]byte(expectedString), &eval) - - return reflect.DeepEqual(aval, eval), nil + var equal bool + equal, matcher.firstFailurePath = deepEqual(aval, eval) + return equal, nil } func (matcher *MatchJSONMatcher) FailureMessage(actual interface{}) (message string) { actualString, expectedString, _ := matcher.prettyPrint(actual) - return format.Message(actualString, "to match JSON of", expectedString) + return formattedMessage(format.Message(actualString, "to match JSON of", expectedString), matcher.firstFailurePath) } func (matcher *MatchJSONMatcher) NegatedFailureMessage(actual interface{}) (message string) { actualString, expectedString, _ := matcher.prettyPrint(actual) - return format.Message(actualString, "not to match JSON of", expectedString) + return formattedMessage(format.Message(actualString, "not to match JSON of", expectedString), matcher.firstFailurePath) +} + +func formattedMessage(comparisonMessage string, failurePath []interface{}) string { + var diffMessage string + if len(failurePath) == 0 { + diffMessage = "" + } else { + diffMessage = fmt.Sprintf("\n\nfirst mismatched key: %s", formattedFailurePath(failurePath)) + } + return fmt.Sprintf("%s%s", comparisonMessage, diffMessage) +} + +func formattedFailurePath(failurePath []interface{}) string { + formattedPaths := []string{} + for i := len(failurePath) - 1; i >= 0; i-- { + switch p := failurePath[i].(type) { + case int: + formattedPaths = append(formattedPaths, fmt.Sprintf(`[%d]`, p)) + default: + if i != len(failurePath)-1 { + formattedPaths = append(formattedPaths, ".") + } + formattedPaths = append(formattedPaths, fmt.Sprintf(`"%s"`, p)) + } + } + return strings.Join(formattedPaths, "") } func (matcher *MatchJSONMatcher) prettyPrint(actual interface{}) (actualFormatted, expectedFormatted string, err error) { @@ -62,3 +91,45 @@ func (matcher *MatchJSONMatcher) prettyPrint(actual interface{}) (actualFormatte return abuf.String(), ebuf.String(), nil } + +func deepEqual(a interface{}, b interface{}) (bool, []interface{}) { + var errorPath []interface{} + if reflect.TypeOf(a) != reflect.TypeOf(b) { + return false, errorPath + } + + switch a.(type) { + case []interface{}: + if len(a.([]interface{})) != len(b.([]interface{})) { + return false, errorPath + } + + for i, v := range a.([]interface{}) { + elementEqual, keyPath := deepEqual(v, b.([]interface{})[i]) + if !elementEqual { + return false, append(keyPath, i) + } + } + return true, errorPath + + case map[string]interface{}: + if len(a.(map[string]interface{})) != len(b.(map[string]interface{})) { + return false, errorPath + } + + for k, v1 := range a.(map[string]interface{}) { + v2, ok := b.(map[string]interface{})[k] + if !ok { + return false, errorPath + } + elementEqual, keyPath := deepEqual(v1, v2) + if !elementEqual { + return false, append(keyPath, k) + } + } + return true, errorPath + + default: + return a == b, errorPath + } +} diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go b/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go new file mode 100644 index 00000000..da265629 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go @@ -0,0 +1,131 @@ +package matchers + +import ( + "bytes" + "encoding/xml" + "errors" + "fmt" + "io" + "reflect" + "strings" + + "github.com/onsi/gomega/format" + "golang.org/x/net/html/charset" +) + +type MatchXMLMatcher struct { + XMLToMatch interface{} +} + +func (matcher *MatchXMLMatcher) Match(actual interface{}) (success bool, err error) { + actualString, expectedString, err := matcher.formattedPrint(actual) + if err != nil { + return false, err + } + + aval, err := parseXmlContent(actualString) + if err != nil { + return false, fmt.Errorf("Actual '%s' should be valid XML, but it is not.\nUnderlying error:%s", actualString, err) + } + + eval, err := parseXmlContent(expectedString) + if err != nil { + return false, fmt.Errorf("Expected '%s' should be valid XML, but it is not.\nUnderlying error:%s", expectedString, err) + } + + return reflect.DeepEqual(aval, eval), nil +} + +func (matcher *MatchXMLMatcher) FailureMessage(actual interface{}) (message string) { + actualString, expectedString, _ := matcher.formattedPrint(actual) + return fmt.Sprintf("Expected\n%s\nto match XML of\n%s", actualString, expectedString) +} + +func (matcher *MatchXMLMatcher) NegatedFailureMessage(actual interface{}) (message string) { + actualString, expectedString, _ := matcher.formattedPrint(actual) + return fmt.Sprintf("Expected\n%s\nnot to match XML of\n%s", actualString, expectedString) +} + +func (matcher *MatchXMLMatcher) formattedPrint(actual interface{}) (actualString, expectedString string, err error) { + var ok bool + actualString, ok = toString(actual) + if !ok { + return "", "", fmt.Errorf("MatchXMLMatcher matcher requires a string, stringer, or []byte. Got actual:\n%s", format.Object(actual, 1)) + } + expectedString, ok = toString(matcher.XMLToMatch) + if !ok { + return "", "", fmt.Errorf("MatchXMLMatcher matcher requires a string, stringer, or []byte. Got expected:\n%s", format.Object(matcher.XMLToMatch, 1)) + } + return actualString, expectedString, nil +} + +func parseXmlContent(content string) (*xmlNode, error) { + allNodes := []*xmlNode{} + + dec := newXmlDecoder(strings.NewReader(content)) + for { + tok, err := dec.Token() + if err != nil { + if err == io.EOF { + break + } + return nil, fmt.Errorf("failed to decode next token: %v", err) + } + + lastNodeIndex := len(allNodes) - 1 + var lastNode *xmlNode + if len(allNodes) > 0 { + lastNode = allNodes[lastNodeIndex] + } else { + lastNode = &xmlNode{} + } + + switch tok := tok.(type) { + case xml.StartElement: + allNodes = append(allNodes, &xmlNode{XMLName: tok.Name, XMLAttr: tok.Attr}) + case xml.EndElement: + if len(allNodes) > 1 { + allNodes[lastNodeIndex-1].Nodes = append(allNodes[lastNodeIndex-1].Nodes, lastNode) + allNodes = allNodes[:lastNodeIndex] + } + case xml.CharData: + lastNode.Content = append(lastNode.Content, tok.Copy()...) + case xml.Comment: + lastNode.Comments = append(lastNode.Comments, tok.Copy()) + case xml.ProcInst: + lastNode.ProcInsts = append(lastNode.ProcInsts, tok.Copy()) + } + } + + if len(allNodes) == 0 { + return nil, errors.New("found no nodes") + } + firstNode := allNodes[0] + trimParentNodesContentSpaces(firstNode) + + return firstNode, nil +} + +func newXmlDecoder(reader io.Reader) *xml.Decoder { + dec := xml.NewDecoder(reader) + dec.CharsetReader = charset.NewReaderLabel + return dec +} + +func trimParentNodesContentSpaces(node *xmlNode) { + if len(node.Nodes) > 0 { + node.Content = bytes.TrimSpace(node.Content) + for _, childNode := range node.Nodes { + trimParentNodesContentSpaces(childNode) + } + } +} + +type xmlNode struct { + XMLName xml.Name + Comments []xml.Comment + ProcInsts []xml.ProcInst + XMLAttr []xml.Attr + Content []byte + Nodes []*xmlNode +} diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/matchers/panic_matcher.go b/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/matchers/panic_matcher.go index 75ab251b..640f4db1 100644 --- a/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/matchers/panic_matcher.go +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/matchers/panic_matcher.go @@ -2,11 +2,14 @@ package matchers import ( "fmt" - "github.com/onsi/gomega/format" "reflect" + + "github.com/onsi/gomega/format" ) -type PanicMatcher struct{} +type PanicMatcher struct { + object interface{} +} func (matcher *PanicMatcher) Match(actual interface{}) (success bool, err error) { if actual == nil { @@ -24,6 +27,7 @@ func (matcher *PanicMatcher) Match(actual interface{}) (success bool, err error) success = false defer func() { if e := recover(); e != nil { + matcher.object = e success = true } }() @@ -38,5 +42,5 @@ func (matcher *PanicMatcher) FailureMessage(actual interface{}) (message string) } func (matcher *PanicMatcher) NegatedFailureMessage(actual interface{}) (message string) { - return format.Message(actual, "not to panic") + return format.Message(actual, fmt.Sprintf("not to panic, but panicked with\n%s", format.Object(matcher.object, 1))) } diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/matchers/receive_matcher.go b/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/matchers/receive_matcher.go index 7a8c2cda..74e9e7eb 100644 --- a/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/matchers/receive_matcher.go +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/matchers/receive_matcher.go @@ -64,9 +64,8 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro if didReceive { matcher.receivedValue = value return subMatcher.Match(matcher.receivedValue.Interface()) - } else { - return false, nil } + return false, nil } if didReceive { @@ -76,9 +75,8 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro } return true, nil - } else { - return false, nil } + return false, nil } func (matcher *ReceiveMatcher) FailureMessage(actual interface{}) (message string) { @@ -94,9 +92,8 @@ func (matcher *ReceiveMatcher) FailureMessage(actual interface{}) (message strin return subMatcher.FailureMessage(matcher.receivedValue.Interface()) } return "When passed a matcher, ReceiveMatcher's channel *must* receive something." - } else { - return format.Message(actual, "to receive something."+closedAddendum) } + return format.Message(actual, "to receive something."+closedAddendum) } func (matcher *ReceiveMatcher) NegatedFailureMessage(actual interface{}) (message string) { @@ -112,9 +109,8 @@ func (matcher *ReceiveMatcher) NegatedFailureMessage(actual interface{}) (messag return subMatcher.NegatedFailureMessage(matcher.receivedValue.Interface()) } return "When passed a matcher, ReceiveMatcher's channel *must* receive something." - } else { - return format.Message(actual, "not to receive anything."+closedAddendum) } + return format.Message(actual, "not to receive anything."+closedAddendum) } func (matcher *ReceiveMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go b/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go index 32529c51..8181f43a 100644 --- a/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraphmatching.go @@ -101,9 +101,8 @@ func (bg *BipartiteGraph) createSLAPGuideLayers(matching EdgeSet) (guideLayers [ if len(currentLayer) == 0 { return []NodeOrderedSet{} - } else { - guideLayers = append(guideLayers, currentLayer) } + guideLayers = append(guideLayers, currentLayer) done := false @@ -152,9 +151,8 @@ func (bg *BipartiteGraph) createSLAPGuideLayers(matching EdgeSet) (guideLayers [ if len(currentLayer) == 0 { return []NodeOrderedSet{} - } else { - guideLayers = append(guideLayers, currentLayer) } + guideLayers = append(guideLayers, currentLayer) } return diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/matchers/type_support.go b/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/matchers/type_support.go index 04020f00..b05a5e75 100644 --- a/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/matchers/type_support.go +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/matchers/type_support.go @@ -53,9 +53,8 @@ func toInteger(a interface{}) int64 { return int64(reflect.ValueOf(a).Uint()) } else if isFloat(a) { return int64(reflect.ValueOf(a).Float()) - } else { - panic(fmt.Sprintf("Expected a number! Got <%T> %#v", a, a)) } + panic(fmt.Sprintf("Expected a number! Got <%T> %#v", a, a)) } func toUnsignedInteger(a interface{}) uint64 { @@ -65,9 +64,8 @@ func toUnsignedInteger(a interface{}) uint64 { return reflect.ValueOf(a).Uint() } else if isFloat(a) { return uint64(reflect.ValueOf(a).Float()) - } else { - panic(fmt.Sprintf("Expected a number! Got <%T> %#v", a, a)) } + panic(fmt.Sprintf("Expected a number! Got <%T> %#v", a, a)) } func toFloat(a interface{}) float64 { @@ -77,9 +75,8 @@ func toFloat(a interface{}) float64 { return float64(reflect.ValueOf(a).Uint()) } else if isFloat(a) { return reflect.ValueOf(a).Float() - } else { - panic(fmt.Sprintf("Expected a number! Got <%T> %#v", a, a)) } + panic(fmt.Sprintf("Expected a number! Got <%T> %#v", a, a)) } func isError(a interface{}) bool { diff --git a/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/types/types.go b/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/types/types.go index 1c632ade..a83b4011 100644 --- a/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/types/types.go +++ b/src/stackdriver-nozzle/vendor/github.com/onsi/gomega/types/types.go @@ -4,7 +4,7 @@ type GomegaFailHandler func(message string, callerSkip ...int) //A simple *testing.T interface wrapper type GomegaTestingT interface { - Errorf(format string, args ...interface{}) + Fatalf(format string, args ...interface{}) } //All Gomega matchers must implement the GomegaMatcher interface diff --git a/src/stackdriver-nozzle/vendor/github.com/pkg/errors/stack.go b/src/stackdriver-nozzle/vendor/github.com/pkg/errors/stack.go index 6b1f2891..cbe3f3e3 100644 --- a/src/stackdriver-nozzle/vendor/github.com/pkg/errors/stack.go +++ b/src/stackdriver-nozzle/vendor/github.com/pkg/errors/stack.go @@ -79,6 +79,14 @@ func (f Frame) Format(s fmt.State, verb rune) { // StackTrace is stack of Frames from innermost (newest) to outermost (oldest). type StackTrace []Frame +// Format formats the stack of Frames according to the fmt.Formatter interface. +// +// %s lists source files for each Frame in the stack +// %v lists the source file and line number for each Frame in the stack +// +// Format accepts flags that alter the printing of some verbs, as follows: +// +// %+v Prints filename, function, and line number for each Frame in the stack. func (st StackTrace) Format(s fmt.State, verb rune) { switch verb { case 'v': diff --git a/src/stackdriver-nozzle/vendor/golang.org/x/net/context/context.go b/src/stackdriver-nozzle/vendor/golang.org/x/net/context/context.go index 134654cf..d3681ab4 100644 --- a/src/stackdriver-nozzle/vendor/golang.org/x/net/context/context.go +++ b/src/stackdriver-nozzle/vendor/golang.org/x/net/context/context.go @@ -7,7 +7,7 @@ // and between processes. // // Incoming requests to a server should create a Context, and outgoing calls to -// servers should accept a Context. The chain of function calls between must +// servers should accept a Context. The chain of function calls between must // propagate the Context, optionally replacing it with a modified copy created // using WithDeadline, WithTimeout, WithCancel, or WithValue. // @@ -16,14 +16,14 @@ // propagation: // // Do not store Contexts inside a struct type; instead, pass a Context -// explicitly to each function that needs it. The Context should be the first +// explicitly to each function that needs it. The Context should be the first // parameter, typically named ctx: // // func DoSomething(ctx context.Context, arg Arg) error { // // ... use ctx ... // } // -// Do not pass a nil Context, even if a function permits it. Pass context.TODO +// Do not pass a nil Context, even if a function permits it. Pass context.TODO // if you are unsure about which Context to use. // // Use context Values only for request-scoped data that transits processes and @@ -36,112 +36,15 @@ // Contexts. package context // import "golang.org/x/net/context" -import "time" - -// A Context carries a deadline, a cancelation signal, and other values across -// API boundaries. -// -// Context's methods may be called by multiple goroutines simultaneously. -type Context interface { - // Deadline returns the time when work done on behalf of this context - // should be canceled. Deadline returns ok==false when no deadline is - // set. Successive calls to Deadline return the same results. - Deadline() (deadline time.Time, ok bool) - - // Done returns a channel that's closed when work done on behalf of this - // context should be canceled. Done may return nil if this context can - // never be canceled. Successive calls to Done return the same value. - // - // WithCancel arranges for Done to be closed when cancel is called; - // WithDeadline arranges for Done to be closed when the deadline - // expires; WithTimeout arranges for Done to be closed when the timeout - // elapses. - // - // Done is provided for use in select statements: - // - // // Stream generates values with DoSomething and sends them to out - // // until DoSomething returns an error or ctx.Done is closed. - // func Stream(ctx context.Context, out chan<- Value) error { - // for { - // v, err := DoSomething(ctx) - // if err != nil { - // return err - // } - // select { - // case <-ctx.Done(): - // return ctx.Err() - // case out <- v: - // } - // } - // } - // - // See http://blog.golang.org/pipelines for more examples of how to use - // a Done channel for cancelation. - Done() <-chan struct{} - - // Err returns a non-nil error value after Done is closed. Err returns - // Canceled if the context was canceled or DeadlineExceeded if the - // context's deadline passed. No other values for Err are defined. - // After Done is closed, successive calls to Err return the same value. - Err() error - - // Value returns the value associated with this context for key, or nil - // if no value is associated with key. Successive calls to Value with - // the same key returns the same result. - // - // Use context values only for request-scoped data that transits - // processes and API boundaries, not for passing optional parameters to - // functions. - // - // A key identifies a specific value in a Context. Functions that wish - // to store values in Context typically allocate a key in a global - // variable then use that key as the argument to context.WithValue and - // Context.Value. A key can be any type that supports equality; - // packages should define keys as an unexported type to avoid - // collisions. - // - // Packages that define a Context key should provide type-safe accessors - // for the values stores using that key: - // - // // Package user defines a User type that's stored in Contexts. - // package user - // - // import "golang.org/x/net/context" - // - // // User is the type of value stored in the Contexts. - // type User struct {...} - // - // // key is an unexported type for keys defined in this package. - // // This prevents collisions with keys defined in other packages. - // type key int - // - // // userKey is the key for user.User values in Contexts. It is - // // unexported; clients use user.NewContext and user.FromContext - // // instead of using this key directly. - // var userKey key = 0 - // - // // NewContext returns a new Context that carries value u. - // func NewContext(ctx context.Context, u *User) context.Context { - // return context.WithValue(ctx, userKey, u) - // } - // - // // FromContext returns the User value stored in ctx, if any. - // func FromContext(ctx context.Context) (*User, bool) { - // u, ok := ctx.Value(userKey).(*User) - // return u, ok - // } - Value(key interface{}) interface{} -} - // Background returns a non-nil, empty Context. It is never canceled, has no -// values, and has no deadline. It is typically used by the main function, +// values, and has no deadline. It is typically used by the main function, // initialization, and tests, and as the top-level Context for incoming // requests. func Background() Context { return background } -// TODO returns a non-nil, empty Context. Code should use context.TODO when +// TODO returns a non-nil, empty Context. Code should use context.TODO when // it's unclear which Context to use or it is not yet available (because the // surrounding function has not yet been extended to accept a Context // parameter). TODO is recognized by static analysis tools that determine @@ -149,8 +52,3 @@ func Background() Context { func TODO() Context { return todo } - -// A CancelFunc tells an operation to abandon its work. -// A CancelFunc does not wait for the work to stop. -// After the first call, subsequent calls to a CancelFunc do nothing. -type CancelFunc func() diff --git a/src/stackdriver-nozzle/vendor/golang.org/x/net/context/go17.go b/src/stackdriver-nozzle/vendor/golang.org/x/net/context/go17.go index f8cda19a..d20f52b7 100644 --- a/src/stackdriver-nozzle/vendor/golang.org/x/net/context/go17.go +++ b/src/stackdriver-nozzle/vendor/golang.org/x/net/context/go17.go @@ -35,8 +35,8 @@ func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { } // WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned // context's Done channel is closed when the deadline expires, when the returned // cancel function is called, or when the parent context's Done channel is // closed, whichever happens first. diff --git a/src/stackdriver-nozzle/vendor/golang.org/x/net/context/go19.go b/src/stackdriver-nozzle/vendor/golang.org/x/net/context/go19.go new file mode 100644 index 00000000..d88bd1db --- /dev/null +++ b/src/stackdriver-nozzle/vendor/golang.org/x/net/context/go19.go @@ -0,0 +1,20 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package context + +import "context" // standard library's context, as of Go 1.7 + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context = context.Context + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc = context.CancelFunc diff --git a/src/stackdriver-nozzle/vendor/golang.org/x/net/context/pre_go17.go b/src/stackdriver-nozzle/vendor/golang.org/x/net/context/pre_go17.go index 5a30acab..0f35592d 100644 --- a/src/stackdriver-nozzle/vendor/golang.org/x/net/context/pre_go17.go +++ b/src/stackdriver-nozzle/vendor/golang.org/x/net/context/pre_go17.go @@ -13,7 +13,7 @@ import ( "time" ) -// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// An emptyCtx is never canceled, has no values, and has no deadline. It is not // struct{}, since vars of this type must have distinct addresses. type emptyCtx int @@ -104,7 +104,7 @@ func propagateCancel(parent Context, child canceler) { } // parentCancelCtx follows a chain of parent references until it finds a -// *cancelCtx. This function understands how each of the concrete types in this +// *cancelCtx. This function understands how each of the concrete types in this // package represents its parent. func parentCancelCtx(parent Context) (*cancelCtx, bool) { for { @@ -134,14 +134,14 @@ func removeChild(parent Context, child canceler) { p.mu.Unlock() } -// A canceler is a context type that can be canceled directly. The +// A canceler is a context type that can be canceled directly. The // implementations are *cancelCtx and *timerCtx. type canceler interface { cancel(removeFromParent bool, err error) Done() <-chan struct{} } -// A cancelCtx can be canceled. When canceled, it also cancels any children +// A cancelCtx can be canceled. When canceled, it also cancels any children // that implement canceler. type cancelCtx struct { Context @@ -193,8 +193,8 @@ func (c *cancelCtx) cancel(removeFromParent bool, err error) { } // WithDeadline returns a copy of the parent context with the deadline adjusted -// to be no later than d. If the parent's deadline is already earlier than d, -// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned // context's Done channel is closed when the deadline expires, when the returned // cancel function is called, or when the parent context's Done channel is // closed, whichever happens first. @@ -226,8 +226,8 @@ func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { return c, func() { c.cancel(true, Canceled) } } -// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to -// implement Done and Err. It implements cancel by stopping its timer then +// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to +// implement Done and Err. It implements cancel by stopping its timer then // delegating to cancelCtx.cancel. type timerCtx struct { *cancelCtx @@ -281,7 +281,7 @@ func WithValue(parent Context, key interface{}, val interface{}) Context { return &valueCtx{parent, key, val} } -// A valueCtx carries a key-value pair. It implements Value for that key and +// A valueCtx carries a key-value pair. It implements Value for that key and // delegates all other calls to the embedded Context. type valueCtx struct { Context diff --git a/src/stackdriver-nozzle/vendor/golang.org/x/net/context/pre_go19.go b/src/stackdriver-nozzle/vendor/golang.org/x/net/context/pre_go19.go new file mode 100644 index 00000000..b105f80b --- /dev/null +++ b/src/stackdriver-nozzle/vendor/golang.org/x/net/context/pre_go19.go @@ -0,0 +1,109 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package context + +import "time" + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + // + // WithCancel arranges for Done to be closed when cancel is called; + // WithDeadline arranges for Done to be closed when the deadline + // expires; WithTimeout arranges for Done to be closed when the timeout + // elapses. + // + // Done is provided for use in select statements: + // + // // Stream generates values with DoSomething and sends them to out + // // until DoSomething returns an error or ctx.Done is closed. + // func Stream(ctx context.Context, out chan<- Value) error { + // for { + // v, err := DoSomething(ctx) + // if err != nil { + // return err + // } + // select { + // case <-ctx.Done(): + // return ctx.Err() + // case out <- v: + // } + // } + // } + // + // See http://blog.golang.org/pipelines for more examples of how to use + // a Done channel for cancelation. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + // + // A key identifies a specific value in a Context. Functions that wish + // to store values in Context typically allocate a key in a global + // variable then use that key as the argument to context.WithValue and + // Context.Value. A key can be any type that supports equality; + // packages should define keys as an unexported type to avoid + // collisions. + // + // Packages that define a Context key should provide type-safe accessors + // for the values stores using that key: + // + // // Package user defines a User type that's stored in Contexts. + // package user + // + // import "golang.org/x/net/context" + // + // // User is the type of value stored in the Contexts. + // type User struct {...} + // + // // key is an unexported type for keys defined in this package. + // // This prevents collisions with keys defined in other packages. + // type key int + // + // // userKey is the key for user.User values in Contexts. It is + // // unexported; clients use user.NewContext and user.FromContext + // // instead of using this key directly. + // var userKey key = 0 + // + // // NewContext returns a new Context that carries value u. + // func NewContext(ctx context.Context, u *User) context.Context { + // return context.WithValue(ctx, userKey, u) + // } + // + // // FromContext returns the User value stored in ctx, if any. + // func FromContext(ctx context.Context) (*User, bool) { + // u, ok := ctx.Value(userKey).(*User) + // return u, ok + // } + Value(key interface{}) interface{} +} + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc func() diff --git a/src/stackdriver-nozzle/vendor/golang.org/x/net/html/atom/atom.go b/src/stackdriver-nozzle/vendor/golang.org/x/net/html/atom/atom.go new file mode 100644 index 00000000..cd0a8ac1 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/golang.org/x/net/html/atom/atom.go @@ -0,0 +1,78 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package atom provides integer codes (also known as atoms) for a fixed set of +// frequently occurring HTML strings: tag names and attribute keys such as "p" +// and "id". +// +// Sharing an atom's name between all elements with the same tag can result in +// fewer string allocations when tokenizing and parsing HTML. Integer +// comparisons are also generally faster than string comparisons. +// +// The value of an atom's particular code is not guaranteed to stay the same +// between versions of this package. Neither is any ordering guaranteed: +// whether atom.H1 < atom.H2 may also change. The codes are not guaranteed to +// be dense. The only guarantees are that e.g. looking up "div" will yield +// atom.Div, calling atom.Div.String will return "div", and atom.Div != 0. +package atom // import "golang.org/x/net/html/atom" + +// Atom is an integer code for a string. The zero value maps to "". +type Atom uint32 + +// String returns the atom's name. +func (a Atom) String() string { + start := uint32(a >> 8) + n := uint32(a & 0xff) + if start+n > uint32(len(atomText)) { + return "" + } + return atomText[start : start+n] +} + +func (a Atom) string() string { + return atomText[a>>8 : a>>8+a&0xff] +} + +// fnv computes the FNV hash with an arbitrary starting value h. +func fnv(h uint32, s []byte) uint32 { + for i := range s { + h ^= uint32(s[i]) + h *= 16777619 + } + return h +} + +func match(s string, t []byte) bool { + for i, c := range t { + if s[i] != c { + return false + } + } + return true +} + +// Lookup returns the atom whose name is s. It returns zero if there is no +// such atom. The lookup is case sensitive. +func Lookup(s []byte) Atom { + if len(s) == 0 || len(s) > maxAtomLen { + return 0 + } + h := fnv(hash0, s) + if a := table[h&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) { + return a + } + if a := table[(h>>16)&uint32(len(table)-1)]; int(a&0xff) == len(s) && match(a.string(), s) { + return a + } + return 0 +} + +// String returns a string whose contents are equal to s. In that sense, it is +// equivalent to string(s) but may be more efficient. +func String(s []byte) string { + if a := Lookup(s); a != 0 { + return a.String() + } + return string(s) +} diff --git a/src/stackdriver-nozzle/vendor/golang.org/x/net/html/atom/table.go b/src/stackdriver-nozzle/vendor/golang.org/x/net/html/atom/table.go new file mode 100644 index 00000000..2605ba31 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/golang.org/x/net/html/atom/table.go @@ -0,0 +1,713 @@ +// generated by go run gen.go; DO NOT EDIT + +package atom + +const ( + A Atom = 0x1 + Abbr Atom = 0x4 + Accept Atom = 0x2106 + AcceptCharset Atom = 0x210e + Accesskey Atom = 0x3309 + Action Atom = 0x1f606 + Address Atom = 0x4f307 + Align Atom = 0x1105 + Alt Atom = 0x4503 + Annotation Atom = 0x1670a + AnnotationXml Atom = 0x1670e + Applet Atom = 0x2b306 + Area Atom = 0x2fa04 + Article Atom = 0x38807 + Aside Atom = 0x8305 + Async Atom = 0x7b05 + Audio Atom = 0xa605 + Autocomplete Atom = 0x1fc0c + Autofocus Atom = 0xb309 + Autoplay Atom = 0xce08 + B Atom = 0x101 + Base Atom = 0xd604 + Basefont Atom = 0xd608 + Bdi Atom = 0x1a03 + Bdo Atom = 0xe703 + Bgsound Atom = 0x11807 + Big Atom = 0x12403 + Blink Atom = 0x12705 + Blockquote Atom = 0x12c0a + Body Atom = 0x2f04 + Br Atom = 0x202 + Button Atom = 0x13606 + Canvas Atom = 0x7f06 + Caption Atom = 0x1bb07 + Center Atom = 0x5b506 + Challenge Atom = 0x21f09 + Charset Atom = 0x2807 + Checked Atom = 0x32807 + Cite Atom = 0x3c804 + Class Atom = 0x4de05 + Code Atom = 0x14904 + Col Atom = 0x15003 + Colgroup Atom = 0x15008 + Color Atom = 0x15d05 + Cols Atom = 0x16204 + Colspan Atom = 0x16207 + Command Atom = 0x17507 + Content Atom = 0x42307 + Contenteditable Atom = 0x4230f + Contextmenu Atom = 0x3310b + Controls Atom = 0x18808 + Coords Atom = 0x19406 + Crossorigin Atom = 0x19f0b + Data Atom = 0x44a04 + Datalist Atom = 0x44a08 + Datetime Atom = 0x23c08 + Dd Atom = 0x26702 + Default Atom = 0x8607 + Defer Atom = 0x14b05 + Del Atom = 0x3ef03 + Desc Atom = 0x4db04 + Details Atom = 0x4807 + Dfn Atom = 0x6103 + Dialog Atom = 0x1b06 + Dir Atom = 0x6903 + Dirname Atom = 0x6907 + Disabled Atom = 0x10c08 + Div Atom = 0x11303 + Dl Atom = 0x11e02 + Download Atom = 0x40008 + Draggable Atom = 0x17b09 + Dropzone Atom = 0x39108 + Dt Atom = 0x50902 + Em Atom = 0x6502 + Embed Atom = 0x6505 + Enctype Atom = 0x21107 + Face Atom = 0x5b304 + Fieldset Atom = 0x1b008 + Figcaption Atom = 0x1b80a + Figure Atom = 0x1cc06 + Font Atom = 0xda04 + Footer Atom = 0x8d06 + For Atom = 0x1d803 + ForeignObject Atom = 0x1d80d + Foreignobject Atom = 0x1e50d + Form Atom = 0x1f204 + Formaction Atom = 0x1f20a + Formenctype Atom = 0x20d0b + Formmethod Atom = 0x2280a + Formnovalidate Atom = 0x2320e + Formtarget Atom = 0x2470a + Frame Atom = 0x9a05 + Frameset Atom = 0x9a08 + H1 Atom = 0x26e02 + H2 Atom = 0x29402 + H3 Atom = 0x2a702 + H4 Atom = 0x2e902 + H5 Atom = 0x2f302 + H6 Atom = 0x50b02 + Head Atom = 0x2d504 + Header Atom = 0x2d506 + Headers Atom = 0x2d507 + Height Atom = 0x25106 + Hgroup Atom = 0x25906 + Hidden Atom = 0x26506 + High Atom = 0x26b04 + Hr Atom = 0x27002 + Href Atom = 0x27004 + Hreflang Atom = 0x27008 + Html Atom = 0x25504 + HttpEquiv Atom = 0x2780a + I Atom = 0x601 + Icon Atom = 0x42204 + Id Atom = 0x8502 + Iframe Atom = 0x29606 + Image Atom = 0x29c05 + Img Atom = 0x2a103 + Input Atom = 0x3e805 + Inputmode Atom = 0x3e809 + Ins Atom = 0x1a803 + Isindex Atom = 0x2a907 + Ismap Atom = 0x2b005 + Itemid Atom = 0x33c06 + Itemprop Atom = 0x3c908 + Itemref Atom = 0x5ad07 + Itemscope Atom = 0x2b909 + Itemtype Atom = 0x2c308 + Kbd Atom = 0x1903 + Keygen Atom = 0x3906 + Keytype Atom = 0x53707 + Kind Atom = 0x10904 + Label Atom = 0xf005 + Lang Atom = 0x27404 + Legend Atom = 0x18206 + Li Atom = 0x1202 + Link Atom = 0x12804 + List Atom = 0x44e04 + Listing Atom = 0x44e07 + Loop Atom = 0xf404 + Low Atom = 0x11f03 + Malignmark Atom = 0x100a + Manifest Atom = 0x5f108 + Map Atom = 0x2b203 + Mark Atom = 0x1604 + Marquee Atom = 0x2cb07 + Math Atom = 0x2d204 + Max Atom = 0x2e103 + Maxlength Atom = 0x2e109 + Media Atom = 0x6e05 + Mediagroup Atom = 0x6e0a + Menu Atom = 0x33804 + Menuitem Atom = 0x33808 + Meta Atom = 0x45d04 + Meter Atom = 0x24205 + Method Atom = 0x22c06 + Mglyph Atom = 0x2a206 + Mi Atom = 0x2eb02 + Min Atom = 0x2eb03 + Minlength Atom = 0x2eb09 + Mn Atom = 0x23502 + Mo Atom = 0x3ed02 + Ms Atom = 0x2bc02 + Mtext Atom = 0x2f505 + Multiple Atom = 0x30308 + Muted Atom = 0x30b05 + Name Atom = 0x6c04 + Nav Atom = 0x3e03 + Nobr Atom = 0x5704 + Noembed Atom = 0x6307 + Noframes Atom = 0x9808 + Noscript Atom = 0x3d208 + Novalidate Atom = 0x2360a + Object Atom = 0x1ec06 + Ol Atom = 0xc902 + Onabort Atom = 0x13a07 + Onafterprint Atom = 0x1c00c + Onautocomplete Atom = 0x1fa0e + Onautocompleteerror Atom = 0x1fa13 + Onbeforeprint Atom = 0x6040d + Onbeforeunload Atom = 0x4e70e + Onblur Atom = 0xaa06 + Oncancel Atom = 0xe908 + Oncanplay Atom = 0x28509 + Oncanplaythrough Atom = 0x28510 + Onchange Atom = 0x3a708 + Onclick Atom = 0x31007 + Onclose Atom = 0x31707 + Oncontextmenu Atom = 0x32f0d + Oncuechange Atom = 0x3420b + Ondblclick Atom = 0x34d0a + Ondrag Atom = 0x35706 + Ondragend Atom = 0x35709 + Ondragenter Atom = 0x3600b + Ondragleave Atom = 0x36b0b + Ondragover Atom = 0x3760a + Ondragstart Atom = 0x3800b + Ondrop Atom = 0x38f06 + Ondurationchange Atom = 0x39f10 + Onemptied Atom = 0x39609 + Onended Atom = 0x3af07 + Onerror Atom = 0x3b607 + Onfocus Atom = 0x3bd07 + Onhashchange Atom = 0x3da0c + Oninput Atom = 0x3e607 + Oninvalid Atom = 0x3f209 + Onkeydown Atom = 0x3fb09 + Onkeypress Atom = 0x4080a + Onkeyup Atom = 0x41807 + Onlanguagechange Atom = 0x43210 + Onload Atom = 0x44206 + Onloadeddata Atom = 0x4420c + Onloadedmetadata Atom = 0x45510 + Onloadstart Atom = 0x46b0b + Onmessage Atom = 0x47609 + Onmousedown Atom = 0x47f0b + Onmousemove Atom = 0x48a0b + Onmouseout Atom = 0x4950a + Onmouseover Atom = 0x4a20b + Onmouseup Atom = 0x4ad09 + Onmousewheel Atom = 0x4b60c + Onoffline Atom = 0x4c209 + Ononline Atom = 0x4cb08 + Onpagehide Atom = 0x4d30a + Onpageshow Atom = 0x4fe0a + Onpause Atom = 0x50d07 + Onplay Atom = 0x51706 + Onplaying Atom = 0x51709 + Onpopstate Atom = 0x5200a + Onprogress Atom = 0x52a0a + Onratechange Atom = 0x53e0c + Onreset Atom = 0x54a07 + Onresize Atom = 0x55108 + Onscroll Atom = 0x55f08 + Onseeked Atom = 0x56708 + Onseeking Atom = 0x56f09 + Onselect Atom = 0x57808 + Onshow Atom = 0x58206 + Onsort Atom = 0x58b06 + Onstalled Atom = 0x59509 + Onstorage Atom = 0x59e09 + Onsubmit Atom = 0x5a708 + Onsuspend Atom = 0x5bb09 + Ontimeupdate Atom = 0xdb0c + Ontoggle Atom = 0x5c408 + Onunload Atom = 0x5cc08 + Onvolumechange Atom = 0x5d40e + Onwaiting Atom = 0x5e209 + Open Atom = 0x3cf04 + Optgroup Atom = 0xf608 + Optimum Atom = 0x5eb07 + Option Atom = 0x60006 + Output Atom = 0x49c06 + P Atom = 0xc01 + Param Atom = 0xc05 + Pattern Atom = 0x5107 + Ping Atom = 0x7704 + Placeholder Atom = 0xc30b + Plaintext Atom = 0xfd09 + Poster Atom = 0x15706 + Pre Atom = 0x25e03 + Preload Atom = 0x25e07 + Progress Atom = 0x52c08 + Prompt Atom = 0x5fa06 + Public Atom = 0x41e06 + Q Atom = 0x13101 + Radiogroup Atom = 0x30a + Readonly Atom = 0x2fb08 + Rel Atom = 0x25f03 + Required Atom = 0x1d008 + Reversed Atom = 0x5a08 + Rows Atom = 0x9204 + Rowspan Atom = 0x9207 + Rp Atom = 0x1c602 + Rt Atom = 0x13f02 + Ruby Atom = 0xaf04 + S Atom = 0x2c01 + Samp Atom = 0x4e04 + Sandbox Atom = 0xbb07 + Scope Atom = 0x2bd05 + Scoped Atom = 0x2bd06 + Script Atom = 0x3d406 + Seamless Atom = 0x31c08 + Section Atom = 0x4e207 + Select Atom = 0x57a06 + Selected Atom = 0x57a08 + Shape Atom = 0x4f905 + Size Atom = 0x55504 + Sizes Atom = 0x55505 + Small Atom = 0x18f05 + Sortable Atom = 0x58d08 + Sorted Atom = 0x19906 + Source Atom = 0x1aa06 + Spacer Atom = 0x2db06 + Span Atom = 0x9504 + Spellcheck Atom = 0x3230a + Src Atom = 0x3c303 + Srcdoc Atom = 0x3c306 + Srclang Atom = 0x41107 + Start Atom = 0x38605 + Step Atom = 0x5f704 + Strike Atom = 0x53306 + Strong Atom = 0x55906 + Style Atom = 0x61105 + Sub Atom = 0x5a903 + Summary Atom = 0x61607 + Sup Atom = 0x61d03 + Svg Atom = 0x62003 + System Atom = 0x62306 + Tabindex Atom = 0x46308 + Table Atom = 0x42d05 + Target Atom = 0x24b06 + Tbody Atom = 0x2e05 + Td Atom = 0x4702 + Template Atom = 0x62608 + Textarea Atom = 0x2f608 + Tfoot Atom = 0x8c05 + Th Atom = 0x22e02 + Thead Atom = 0x2d405 + Time Atom = 0xdd04 + Title Atom = 0xa105 + Tr Atom = 0x10502 + Track Atom = 0x10505 + Translate Atom = 0x14009 + Tt Atom = 0x5302 + Type Atom = 0x21404 + Typemustmatch Atom = 0x2140d + U Atom = 0xb01 + Ul Atom = 0x8a02 + Usemap Atom = 0x51106 + Value Atom = 0x4005 + Var Atom = 0x11503 + Video Atom = 0x28105 + Wbr Atom = 0x12103 + Width Atom = 0x50705 + Wrap Atom = 0x58704 + Xmp Atom = 0xc103 +) + +const hash0 = 0xc17da63e + +const maxAtomLen = 19 + +var table = [1 << 9]Atom{ + 0x1: 0x48a0b, // onmousemove + 0x2: 0x5e209, // onwaiting + 0x3: 0x1fa13, // onautocompleteerror + 0x4: 0x5fa06, // prompt + 0x7: 0x5eb07, // optimum + 0x8: 0x1604, // mark + 0xa: 0x5ad07, // itemref + 0xb: 0x4fe0a, // onpageshow + 0xc: 0x57a06, // select + 0xd: 0x17b09, // draggable + 0xe: 0x3e03, // nav + 0xf: 0x17507, // command + 0x11: 0xb01, // u + 0x14: 0x2d507, // headers + 0x15: 0x44a08, // datalist + 0x17: 0x4e04, // samp + 0x1a: 0x3fb09, // onkeydown + 0x1b: 0x55f08, // onscroll + 0x1c: 0x15003, // col + 0x20: 0x3c908, // itemprop + 0x21: 0x2780a, // http-equiv + 0x22: 0x61d03, // sup + 0x24: 0x1d008, // required + 0x2b: 0x25e07, // preload + 0x2c: 0x6040d, // onbeforeprint + 0x2d: 0x3600b, // ondragenter + 0x2e: 0x50902, // dt + 0x2f: 0x5a708, // onsubmit + 0x30: 0x27002, // hr + 0x31: 0x32f0d, // oncontextmenu + 0x33: 0x29c05, // image + 0x34: 0x50d07, // onpause + 0x35: 0x25906, // hgroup + 0x36: 0x7704, // ping + 0x37: 0x57808, // onselect + 0x3a: 0x11303, // div + 0x3b: 0x1fa0e, // onautocomplete + 0x40: 0x2eb02, // mi + 0x41: 0x31c08, // seamless + 0x42: 0x2807, // charset + 0x43: 0x8502, // id + 0x44: 0x5200a, // onpopstate + 0x45: 0x3ef03, // del + 0x46: 0x2cb07, // marquee + 0x47: 0x3309, // accesskey + 0x49: 0x8d06, // footer + 0x4a: 0x44e04, // list + 0x4b: 0x2b005, // ismap + 0x51: 0x33804, // menu + 0x52: 0x2f04, // body + 0x55: 0x9a08, // frameset + 0x56: 0x54a07, // onreset + 0x57: 0x12705, // blink + 0x58: 0xa105, // title + 0x59: 0x38807, // article + 0x5b: 0x22e02, // th + 0x5d: 0x13101, // q + 0x5e: 0x3cf04, // open + 0x5f: 0x2fa04, // area + 0x61: 0x44206, // onload + 0x62: 0xda04, // font + 0x63: 0xd604, // base + 0x64: 0x16207, // colspan + 0x65: 0x53707, // keytype + 0x66: 0x11e02, // dl + 0x68: 0x1b008, // fieldset + 0x6a: 0x2eb03, // min + 0x6b: 0x11503, // var + 0x6f: 0x2d506, // header + 0x70: 0x13f02, // rt + 0x71: 0x15008, // colgroup + 0x72: 0x23502, // mn + 0x74: 0x13a07, // onabort + 0x75: 0x3906, // keygen + 0x76: 0x4c209, // onoffline + 0x77: 0x21f09, // challenge + 0x78: 0x2b203, // map + 0x7a: 0x2e902, // h4 + 0x7b: 0x3b607, // onerror + 0x7c: 0x2e109, // maxlength + 0x7d: 0x2f505, // mtext + 0x7e: 0xbb07, // sandbox + 0x7f: 0x58b06, // onsort + 0x80: 0x100a, // malignmark + 0x81: 0x45d04, // meta + 0x82: 0x7b05, // async + 0x83: 0x2a702, // h3 + 0x84: 0x26702, // dd + 0x85: 0x27004, // href + 0x86: 0x6e0a, // mediagroup + 0x87: 0x19406, // coords + 0x88: 0x41107, // srclang + 0x89: 0x34d0a, // ondblclick + 0x8a: 0x4005, // value + 0x8c: 0xe908, // oncancel + 0x8e: 0x3230a, // spellcheck + 0x8f: 0x9a05, // frame + 0x91: 0x12403, // big + 0x94: 0x1f606, // action + 0x95: 0x6903, // dir + 0x97: 0x2fb08, // readonly + 0x99: 0x42d05, // table + 0x9a: 0x61607, // summary + 0x9b: 0x12103, // wbr + 0x9c: 0x30a, // radiogroup + 0x9d: 0x6c04, // name + 0x9f: 0x62306, // system + 0xa1: 0x15d05, // color + 0xa2: 0x7f06, // canvas + 0xa3: 0x25504, // html + 0xa5: 0x56f09, // onseeking + 0xac: 0x4f905, // shape + 0xad: 0x25f03, // rel + 0xae: 0x28510, // oncanplaythrough + 0xaf: 0x3760a, // ondragover + 0xb0: 0x62608, // template + 0xb1: 0x1d80d, // foreignObject + 0xb3: 0x9204, // rows + 0xb6: 0x44e07, // listing + 0xb7: 0x49c06, // output + 0xb9: 0x3310b, // contextmenu + 0xbb: 0x11f03, // low + 0xbc: 0x1c602, // rp + 0xbd: 0x5bb09, // onsuspend + 0xbe: 0x13606, // button + 0xbf: 0x4db04, // desc + 0xc1: 0x4e207, // section + 0xc2: 0x52a0a, // onprogress + 0xc3: 0x59e09, // onstorage + 0xc4: 0x2d204, // math + 0xc5: 0x4503, // alt + 0xc7: 0x8a02, // ul + 0xc8: 0x5107, // pattern + 0xc9: 0x4b60c, // onmousewheel + 0xca: 0x35709, // ondragend + 0xcb: 0xaf04, // ruby + 0xcc: 0xc01, // p + 0xcd: 0x31707, // onclose + 0xce: 0x24205, // meter + 0xcf: 0x11807, // bgsound + 0xd2: 0x25106, // height + 0xd4: 0x101, // b + 0xd5: 0x2c308, // itemtype + 0xd8: 0x1bb07, // caption + 0xd9: 0x10c08, // disabled + 0xdb: 0x33808, // menuitem + 0xdc: 0x62003, // svg + 0xdd: 0x18f05, // small + 0xde: 0x44a04, // data + 0xe0: 0x4cb08, // ononline + 0xe1: 0x2a206, // mglyph + 0xe3: 0x6505, // embed + 0xe4: 0x10502, // tr + 0xe5: 0x46b0b, // onloadstart + 0xe7: 0x3c306, // srcdoc + 0xeb: 0x5c408, // ontoggle + 0xed: 0xe703, // bdo + 0xee: 0x4702, // td + 0xef: 0x8305, // aside + 0xf0: 0x29402, // h2 + 0xf1: 0x52c08, // progress + 0xf2: 0x12c0a, // blockquote + 0xf4: 0xf005, // label + 0xf5: 0x601, // i + 0xf7: 0x9207, // rowspan + 0xfb: 0x51709, // onplaying + 0xfd: 0x2a103, // img + 0xfe: 0xf608, // optgroup + 0xff: 0x42307, // content + 0x101: 0x53e0c, // onratechange + 0x103: 0x3da0c, // onhashchange + 0x104: 0x4807, // details + 0x106: 0x40008, // download + 0x109: 0x14009, // translate + 0x10b: 0x4230f, // contenteditable + 0x10d: 0x36b0b, // ondragleave + 0x10e: 0x2106, // accept + 0x10f: 0x57a08, // selected + 0x112: 0x1f20a, // formaction + 0x113: 0x5b506, // center + 0x115: 0x45510, // onloadedmetadata + 0x116: 0x12804, // link + 0x117: 0xdd04, // time + 0x118: 0x19f0b, // crossorigin + 0x119: 0x3bd07, // onfocus + 0x11a: 0x58704, // wrap + 0x11b: 0x42204, // icon + 0x11d: 0x28105, // video + 0x11e: 0x4de05, // class + 0x121: 0x5d40e, // onvolumechange + 0x122: 0xaa06, // onblur + 0x123: 0x2b909, // itemscope + 0x124: 0x61105, // style + 0x127: 0x41e06, // public + 0x129: 0x2320e, // formnovalidate + 0x12a: 0x58206, // onshow + 0x12c: 0x51706, // onplay + 0x12d: 0x3c804, // cite + 0x12e: 0x2bc02, // ms + 0x12f: 0xdb0c, // ontimeupdate + 0x130: 0x10904, // kind + 0x131: 0x2470a, // formtarget + 0x135: 0x3af07, // onended + 0x136: 0x26506, // hidden + 0x137: 0x2c01, // s + 0x139: 0x2280a, // formmethod + 0x13a: 0x3e805, // input + 0x13c: 0x50b02, // h6 + 0x13d: 0xc902, // ol + 0x13e: 0x3420b, // oncuechange + 0x13f: 0x1e50d, // foreignobject + 0x143: 0x4e70e, // onbeforeunload + 0x144: 0x2bd05, // scope + 0x145: 0x39609, // onemptied + 0x146: 0x14b05, // defer + 0x147: 0xc103, // xmp + 0x148: 0x39f10, // ondurationchange + 0x149: 0x1903, // kbd + 0x14c: 0x47609, // onmessage + 0x14d: 0x60006, // option + 0x14e: 0x2eb09, // minlength + 0x14f: 0x32807, // checked + 0x150: 0xce08, // autoplay + 0x152: 0x202, // br + 0x153: 0x2360a, // novalidate + 0x156: 0x6307, // noembed + 0x159: 0x31007, // onclick + 0x15a: 0x47f0b, // onmousedown + 0x15b: 0x3a708, // onchange + 0x15e: 0x3f209, // oninvalid + 0x15f: 0x2bd06, // scoped + 0x160: 0x18808, // controls + 0x161: 0x30b05, // muted + 0x162: 0x58d08, // sortable + 0x163: 0x51106, // usemap + 0x164: 0x1b80a, // figcaption + 0x165: 0x35706, // ondrag + 0x166: 0x26b04, // high + 0x168: 0x3c303, // src + 0x169: 0x15706, // poster + 0x16b: 0x1670e, // annotation-xml + 0x16c: 0x5f704, // step + 0x16d: 0x4, // abbr + 0x16e: 0x1b06, // dialog + 0x170: 0x1202, // li + 0x172: 0x3ed02, // mo + 0x175: 0x1d803, // for + 0x176: 0x1a803, // ins + 0x178: 0x55504, // size + 0x179: 0x43210, // onlanguagechange + 0x17a: 0x8607, // default + 0x17b: 0x1a03, // bdi + 0x17c: 0x4d30a, // onpagehide + 0x17d: 0x6907, // dirname + 0x17e: 0x21404, // type + 0x17f: 0x1f204, // form + 0x181: 0x28509, // oncanplay + 0x182: 0x6103, // dfn + 0x183: 0x46308, // tabindex + 0x186: 0x6502, // em + 0x187: 0x27404, // lang + 0x189: 0x39108, // dropzone + 0x18a: 0x4080a, // onkeypress + 0x18b: 0x23c08, // datetime + 0x18c: 0x16204, // cols + 0x18d: 0x1, // a + 0x18e: 0x4420c, // onloadeddata + 0x190: 0xa605, // audio + 0x192: 0x2e05, // tbody + 0x193: 0x22c06, // method + 0x195: 0xf404, // loop + 0x196: 0x29606, // iframe + 0x198: 0x2d504, // head + 0x19e: 0x5f108, // manifest + 0x19f: 0xb309, // autofocus + 0x1a0: 0x14904, // code + 0x1a1: 0x55906, // strong + 0x1a2: 0x30308, // multiple + 0x1a3: 0xc05, // param + 0x1a6: 0x21107, // enctype + 0x1a7: 0x5b304, // face + 0x1a8: 0xfd09, // plaintext + 0x1a9: 0x26e02, // h1 + 0x1aa: 0x59509, // onstalled + 0x1ad: 0x3d406, // script + 0x1ae: 0x2db06, // spacer + 0x1af: 0x55108, // onresize + 0x1b0: 0x4a20b, // onmouseover + 0x1b1: 0x5cc08, // onunload + 0x1b2: 0x56708, // onseeked + 0x1b4: 0x2140d, // typemustmatch + 0x1b5: 0x1cc06, // figure + 0x1b6: 0x4950a, // onmouseout + 0x1b7: 0x25e03, // pre + 0x1b8: 0x50705, // width + 0x1b9: 0x19906, // sorted + 0x1bb: 0x5704, // nobr + 0x1be: 0x5302, // tt + 0x1bf: 0x1105, // align + 0x1c0: 0x3e607, // oninput + 0x1c3: 0x41807, // onkeyup + 0x1c6: 0x1c00c, // onafterprint + 0x1c7: 0x210e, // accept-charset + 0x1c8: 0x33c06, // itemid + 0x1c9: 0x3e809, // inputmode + 0x1cb: 0x53306, // strike + 0x1cc: 0x5a903, // sub + 0x1cd: 0x10505, // track + 0x1ce: 0x38605, // start + 0x1d0: 0xd608, // basefont + 0x1d6: 0x1aa06, // source + 0x1d7: 0x18206, // legend + 0x1d8: 0x2d405, // thead + 0x1da: 0x8c05, // tfoot + 0x1dd: 0x1ec06, // object + 0x1de: 0x6e05, // media + 0x1df: 0x1670a, // annotation + 0x1e0: 0x20d0b, // formenctype + 0x1e2: 0x3d208, // noscript + 0x1e4: 0x55505, // sizes + 0x1e5: 0x1fc0c, // autocomplete + 0x1e6: 0x9504, // span + 0x1e7: 0x9808, // noframes + 0x1e8: 0x24b06, // target + 0x1e9: 0x38f06, // ondrop + 0x1ea: 0x2b306, // applet + 0x1ec: 0x5a08, // reversed + 0x1f0: 0x2a907, // isindex + 0x1f3: 0x27008, // hreflang + 0x1f5: 0x2f302, // h5 + 0x1f6: 0x4f307, // address + 0x1fa: 0x2e103, // max + 0x1fb: 0xc30b, // placeholder + 0x1fc: 0x2f608, // textarea + 0x1fe: 0x4ad09, // onmouseup + 0x1ff: 0x3800b, // ondragstart +} + +const atomText = "abbradiogrouparamalignmarkbdialogaccept-charsetbodyaccesskey" + + "genavaluealtdetailsampatternobreversedfnoembedirnamediagroup" + + "ingasyncanvasidefaultfooterowspanoframesetitleaudionblurubya" + + "utofocusandboxmplaceholderautoplaybasefontimeupdatebdoncance" + + "labelooptgrouplaintextrackindisabledivarbgsoundlowbrbigblink" + + "blockquotebuttonabortranslatecodefercolgroupostercolorcolspa" + + "nnotation-xmlcommandraggablegendcontrolsmallcoordsortedcross" + + "originsourcefieldsetfigcaptionafterprintfigurequiredforeignO" + + "bjectforeignobjectformactionautocompleteerrorformenctypemust" + + "matchallengeformmethodformnovalidatetimeterformtargetheightm" + + "lhgroupreloadhiddenhigh1hreflanghttp-equivideoncanplaythroug" + + "h2iframeimageimglyph3isindexismappletitemscopeditemtypemarqu" + + "eematheaderspacermaxlength4minlength5mtextareadonlymultiplem" + + "utedonclickoncloseamlesspellcheckedoncontextmenuitemidoncuec" + + "hangeondblclickondragendondragenterondragleaveondragoverondr" + + "agstarticleondropzonemptiedondurationchangeonendedonerroronf" + + "ocusrcdocitempropenoscriptonhashchangeoninputmodeloninvalido" + + "nkeydownloadonkeypressrclangonkeyupublicontenteditableonlang" + + "uagechangeonloadeddatalistingonloadedmetadatabindexonloadsta" + + "rtonmessageonmousedownonmousemoveonmouseoutputonmouseoveronm" + + "ouseuponmousewheelonofflineononlineonpagehidesclassectionbef" + + "oreunloaddresshapeonpageshowidth6onpausemaponplayingonpopsta" + + "teonprogresstrikeytypeonratechangeonresetonresizestrongonscr" + + "ollonseekedonseekingonselectedonshowraponsortableonstalledon" + + "storageonsubmitemrefacenteronsuspendontoggleonunloadonvolume" + + "changeonwaitingoptimumanifestepromptoptionbeforeprintstylesu" + + "mmarysupsvgsystemplate" diff --git a/src/stackdriver-nozzle/vendor/golang.org/x/net/html/charset/charset.go b/src/stackdriver-nozzle/vendor/golang.org/x/net/html/charset/charset.go new file mode 100644 index 00000000..13bed159 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/golang.org/x/net/html/charset/charset.go @@ -0,0 +1,257 @@ +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package charset provides common text encodings for HTML documents. +// +// The mapping from encoding labels to encodings is defined at +// https://encoding.spec.whatwg.org/. +package charset // import "golang.org/x/net/html/charset" + +import ( + "bytes" + "fmt" + "io" + "mime" + "strings" + "unicode/utf8" + + "golang.org/x/net/html" + "golang.org/x/text/encoding" + "golang.org/x/text/encoding/charmap" + "golang.org/x/text/encoding/htmlindex" + "golang.org/x/text/transform" +) + +// Lookup returns the encoding with the specified label, and its canonical +// name. It returns nil and the empty string if label is not one of the +// standard encodings for HTML. Matching is case-insensitive and ignores +// leading and trailing whitespace. Encoders will use HTML escape sequences for +// runes that are not supported by the character set. +func Lookup(label string) (e encoding.Encoding, name string) { + e, err := htmlindex.Get(label) + if err != nil { + return nil, "" + } + name, _ = htmlindex.Name(e) + return &htmlEncoding{e}, name +} + +type htmlEncoding struct{ encoding.Encoding } + +func (h *htmlEncoding) NewEncoder() *encoding.Encoder { + // HTML requires a non-terminating legacy encoder. We use HTML escapes to + // substitute unsupported code points. + return encoding.HTMLEscapeUnsupported(h.Encoding.NewEncoder()) +} + +// DetermineEncoding determines the encoding of an HTML document by examining +// up to the first 1024 bytes of content and the declared Content-Type. +// +// See http://www.whatwg.org/specs/web-apps/current-work/multipage/parsing.html#determining-the-character-encoding +func DetermineEncoding(content []byte, contentType string) (e encoding.Encoding, name string, certain bool) { + if len(content) > 1024 { + content = content[:1024] + } + + for _, b := range boms { + if bytes.HasPrefix(content, b.bom) { + e, name = Lookup(b.enc) + return e, name, true + } + } + + if _, params, err := mime.ParseMediaType(contentType); err == nil { + if cs, ok := params["charset"]; ok { + if e, name = Lookup(cs); e != nil { + return e, name, true + } + } + } + + if len(content) > 0 { + e, name = prescan(content) + if e != nil { + return e, name, false + } + } + + // Try to detect UTF-8. + // First eliminate any partial rune at the end. + for i := len(content) - 1; i >= 0 && i > len(content)-4; i-- { + b := content[i] + if b < 0x80 { + break + } + if utf8.RuneStart(b) { + content = content[:i] + break + } + } + hasHighBit := false + for _, c := range content { + if c >= 0x80 { + hasHighBit = true + break + } + } + if hasHighBit && utf8.Valid(content) { + return encoding.Nop, "utf-8", false + } + + // TODO: change default depending on user's locale? + return charmap.Windows1252, "windows-1252", false +} + +// NewReader returns an io.Reader that converts the content of r to UTF-8. +// It calls DetermineEncoding to find out what r's encoding is. +func NewReader(r io.Reader, contentType string) (io.Reader, error) { + preview := make([]byte, 1024) + n, err := io.ReadFull(r, preview) + switch { + case err == io.ErrUnexpectedEOF: + preview = preview[:n] + r = bytes.NewReader(preview) + case err != nil: + return nil, err + default: + r = io.MultiReader(bytes.NewReader(preview), r) + } + + if e, _, _ := DetermineEncoding(preview, contentType); e != encoding.Nop { + r = transform.NewReader(r, e.NewDecoder()) + } + return r, nil +} + +// NewReaderLabel returns a reader that converts from the specified charset to +// UTF-8. It uses Lookup to find the encoding that corresponds to label, and +// returns an error if Lookup returns nil. It is suitable for use as +// encoding/xml.Decoder's CharsetReader function. +func NewReaderLabel(label string, input io.Reader) (io.Reader, error) { + e, _ := Lookup(label) + if e == nil { + return nil, fmt.Errorf("unsupported charset: %q", label) + } + return transform.NewReader(input, e.NewDecoder()), nil +} + +func prescan(content []byte) (e encoding.Encoding, name string) { + z := html.NewTokenizer(bytes.NewReader(content)) + for { + switch z.Next() { + case html.ErrorToken: + return nil, "" + + case html.StartTagToken, html.SelfClosingTagToken: + tagName, hasAttr := z.TagName() + if !bytes.Equal(tagName, []byte("meta")) { + continue + } + attrList := make(map[string]bool) + gotPragma := false + + const ( + dontKnow = iota + doNeedPragma + doNotNeedPragma + ) + needPragma := dontKnow + + name = "" + e = nil + for hasAttr { + var key, val []byte + key, val, hasAttr = z.TagAttr() + ks := string(key) + if attrList[ks] { + continue + } + attrList[ks] = true + for i, c := range val { + if 'A' <= c && c <= 'Z' { + val[i] = c + 0x20 + } + } + + switch ks { + case "http-equiv": + if bytes.Equal(val, []byte("content-type")) { + gotPragma = true + } + + case "content": + if e == nil { + name = fromMetaElement(string(val)) + if name != "" { + e, name = Lookup(name) + if e != nil { + needPragma = doNeedPragma + } + } + } + + case "charset": + e, name = Lookup(string(val)) + needPragma = doNotNeedPragma + } + } + + if needPragma == dontKnow || needPragma == doNeedPragma && !gotPragma { + continue + } + + if strings.HasPrefix(name, "utf-16") { + name = "utf-8" + e = encoding.Nop + } + + if e != nil { + return e, name + } + } + } +} + +func fromMetaElement(s string) string { + for s != "" { + csLoc := strings.Index(s, "charset") + if csLoc == -1 { + return "" + } + s = s[csLoc+len("charset"):] + s = strings.TrimLeft(s, " \t\n\f\r") + if !strings.HasPrefix(s, "=") { + continue + } + s = s[1:] + s = strings.TrimLeft(s, " \t\n\f\r") + if s == "" { + return "" + } + if q := s[0]; q == '"' || q == '\'' { + s = s[1:] + closeQuote := strings.IndexRune(s, rune(q)) + if closeQuote == -1 { + return "" + } + return s[:closeQuote] + } + + end := strings.IndexAny(s, "; \t\n\f\r") + if end == -1 { + end = len(s) + } + return s[:end] + } + return "" +} + +var boms = []struct { + bom []byte + enc string +}{ + {[]byte{0xfe, 0xff}, "utf-16be"}, + {[]byte{0xff, 0xfe}, "utf-16le"}, + {[]byte{0xef, 0xbb, 0xbf}, "utf-8"}, +} diff --git a/src/stackdriver-nozzle/vendor/golang.org/x/net/html/const.go b/src/stackdriver-nozzle/vendor/golang.org/x/net/html/const.go new file mode 100644 index 00000000..52f651ff --- /dev/null +++ b/src/stackdriver-nozzle/vendor/golang.org/x/net/html/const.go @@ -0,0 +1,102 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +// Section 12.2.3.2 of the HTML5 specification says "The following elements +// have varying levels of special parsing rules". +// https://html.spec.whatwg.org/multipage/syntax.html#the-stack-of-open-elements +var isSpecialElementMap = map[string]bool{ + "address": true, + "applet": true, + "area": true, + "article": true, + "aside": true, + "base": true, + "basefont": true, + "bgsound": true, + "blockquote": true, + "body": true, + "br": true, + "button": true, + "caption": true, + "center": true, + "col": true, + "colgroup": true, + "dd": true, + "details": true, + "dir": true, + "div": true, + "dl": true, + "dt": true, + "embed": true, + "fieldset": true, + "figcaption": true, + "figure": true, + "footer": true, + "form": true, + "frame": true, + "frameset": true, + "h1": true, + "h2": true, + "h3": true, + "h4": true, + "h5": true, + "h6": true, + "head": true, + "header": true, + "hgroup": true, + "hr": true, + "html": true, + "iframe": true, + "img": true, + "input": true, + "isindex": true, + "li": true, + "link": true, + "listing": true, + "marquee": true, + "menu": true, + "meta": true, + "nav": true, + "noembed": true, + "noframes": true, + "noscript": true, + "object": true, + "ol": true, + "p": true, + "param": true, + "plaintext": true, + "pre": true, + "script": true, + "section": true, + "select": true, + "source": true, + "style": true, + "summary": true, + "table": true, + "tbody": true, + "td": true, + "template": true, + "textarea": true, + "tfoot": true, + "th": true, + "thead": true, + "title": true, + "tr": true, + "track": true, + "ul": true, + "wbr": true, + "xmp": true, +} + +func isSpecialElement(element *Node) bool { + switch element.Namespace { + case "", "html": + return isSpecialElementMap[element.Data] + case "svg": + return element.Data == "foreignObject" + } + return false +} diff --git a/src/stackdriver-nozzle/vendor/golang.org/x/net/html/doc.go b/src/stackdriver-nozzle/vendor/golang.org/x/net/html/doc.go new file mode 100644 index 00000000..94f49687 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/golang.org/x/net/html/doc.go @@ -0,0 +1,106 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package html implements an HTML5-compliant tokenizer and parser. + +Tokenization is done by creating a Tokenizer for an io.Reader r. It is the +caller's responsibility to ensure that r provides UTF-8 encoded HTML. + + z := html.NewTokenizer(r) + +Given a Tokenizer z, the HTML is tokenized by repeatedly calling z.Next(), +which parses the next token and returns its type, or an error: + + for { + tt := z.Next() + if tt == html.ErrorToken { + // ... + return ... + } + // Process the current token. + } + +There are two APIs for retrieving the current token. The high-level API is to +call Token; the low-level API is to call Text or TagName / TagAttr. Both APIs +allow optionally calling Raw after Next but before Token, Text, TagName, or +TagAttr. In EBNF notation, the valid call sequence per token is: + + Next {Raw} [ Token | Text | TagName {TagAttr} ] + +Token returns an independent data structure that completely describes a token. +Entities (such as "<") are unescaped, tag names and attribute keys are +lower-cased, and attributes are collected into a []Attribute. For example: + + for { + if z.Next() == html.ErrorToken { + // Returning io.EOF indicates success. + return z.Err() + } + emitToken(z.Token()) + } + +The low-level API performs fewer allocations and copies, but the contents of +the []byte values returned by Text, TagName and TagAttr may change on the next +call to Next. For example, to extract an HTML page's anchor text: + + depth := 0 + for { + tt := z.Next() + switch tt { + case ErrorToken: + return z.Err() + case TextToken: + if depth > 0 { + // emitBytes should copy the []byte it receives, + // if it doesn't process it immediately. + emitBytes(z.Text()) + } + case StartTagToken, EndTagToken: + tn, _ := z.TagName() + if len(tn) == 1 && tn[0] == 'a' { + if tt == StartTagToken { + depth++ + } else { + depth-- + } + } + } + } + +Parsing is done by calling Parse with an io.Reader, which returns the root of +the parse tree (the document element) as a *Node. It is the caller's +responsibility to ensure that the Reader provides UTF-8 encoded HTML. For +example, to process each anchor node in depth-first order: + + doc, err := html.Parse(r) + if err != nil { + // ... + } + var f func(*html.Node) + f = func(n *html.Node) { + if n.Type == html.ElementNode && n.Data == "a" { + // Do something with n... + } + for c := n.FirstChild; c != nil; c = c.NextSibling { + f(c) + } + } + f(doc) + +The relevant specifications include: +https://html.spec.whatwg.org/multipage/syntax.html and +https://html.spec.whatwg.org/multipage/syntax.html#tokenization +*/ +package html // import "golang.org/x/net/html" + +// The tokenization algorithm implemented by this package is not a line-by-line +// transliteration of the relatively verbose state-machine in the WHATWG +// specification. A more direct approach is used instead, where the program +// counter implies the state, such as whether it is tokenizing a tag or a text +// node. Specification compliance is verified by checking expected and actual +// outputs over a test suite rather than aiming for algorithmic fidelity. + +// TODO(nigeltao): Does a DOM API belong in this package or a separate one? +// TODO(nigeltao): How does parsing interact with a JavaScript engine? diff --git a/src/stackdriver-nozzle/vendor/golang.org/x/net/html/doctype.go b/src/stackdriver-nozzle/vendor/golang.org/x/net/html/doctype.go new file mode 100644 index 00000000..c484e5a9 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/golang.org/x/net/html/doctype.go @@ -0,0 +1,156 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "strings" +) + +// parseDoctype parses the data from a DoctypeToken into a name, +// public identifier, and system identifier. It returns a Node whose Type +// is DoctypeNode, whose Data is the name, and which has attributes +// named "system" and "public" for the two identifiers if they were present. +// quirks is whether the document should be parsed in "quirks mode". +func parseDoctype(s string) (n *Node, quirks bool) { + n = &Node{Type: DoctypeNode} + + // Find the name. + space := strings.IndexAny(s, whitespace) + if space == -1 { + space = len(s) + } + n.Data = s[:space] + // The comparison to "html" is case-sensitive. + if n.Data != "html" { + quirks = true + } + n.Data = strings.ToLower(n.Data) + s = strings.TrimLeft(s[space:], whitespace) + + if len(s) < 6 { + // It can't start with "PUBLIC" or "SYSTEM". + // Ignore the rest of the string. + return n, quirks || s != "" + } + + key := strings.ToLower(s[:6]) + s = s[6:] + for key == "public" || key == "system" { + s = strings.TrimLeft(s, whitespace) + if s == "" { + break + } + quote := s[0] + if quote != '"' && quote != '\'' { + break + } + s = s[1:] + q := strings.IndexRune(s, rune(quote)) + var id string + if q == -1 { + id = s + s = "" + } else { + id = s[:q] + s = s[q+1:] + } + n.Attr = append(n.Attr, Attribute{Key: key, Val: id}) + if key == "public" { + key = "system" + } else { + key = "" + } + } + + if key != "" || s != "" { + quirks = true + } else if len(n.Attr) > 0 { + if n.Attr[0].Key == "public" { + public := strings.ToLower(n.Attr[0].Val) + switch public { + case "-//w3o//dtd w3 html strict 3.0//en//", "-/w3d/dtd html 4.0 transitional/en", "html": + quirks = true + default: + for _, q := range quirkyIDs { + if strings.HasPrefix(public, q) { + quirks = true + break + } + } + } + // The following two public IDs only cause quirks mode if there is no system ID. + if len(n.Attr) == 1 && (strings.HasPrefix(public, "-//w3c//dtd html 4.01 frameset//") || + strings.HasPrefix(public, "-//w3c//dtd html 4.01 transitional//")) { + quirks = true + } + } + if lastAttr := n.Attr[len(n.Attr)-1]; lastAttr.Key == "system" && + strings.ToLower(lastAttr.Val) == "http://www.ibm.com/data/dtd/v11/ibmxhtml1-transitional.dtd" { + quirks = true + } + } + + return n, quirks +} + +// quirkyIDs is a list of public doctype identifiers that cause a document +// to be interpreted in quirks mode. The identifiers should be in lower case. +var quirkyIDs = []string{ + "+//silmaril//dtd html pro v0r11 19970101//", + "-//advasoft ltd//dtd html 3.0 aswedit + extensions//", + "-//as//dtd html 3.0 aswedit + extensions//", + "-//ietf//dtd html 2.0 level 1//", + "-//ietf//dtd html 2.0 level 2//", + "-//ietf//dtd html 2.0 strict level 1//", + "-//ietf//dtd html 2.0 strict level 2//", + "-//ietf//dtd html 2.0 strict//", + "-//ietf//dtd html 2.0//", + "-//ietf//dtd html 2.1e//", + "-//ietf//dtd html 3.0//", + "-//ietf//dtd html 3.2 final//", + "-//ietf//dtd html 3.2//", + "-//ietf//dtd html 3//", + "-//ietf//dtd html level 0//", + "-//ietf//dtd html level 1//", + "-//ietf//dtd html level 2//", + "-//ietf//dtd html level 3//", + "-//ietf//dtd html strict level 0//", + "-//ietf//dtd html strict level 1//", + "-//ietf//dtd html strict level 2//", + "-//ietf//dtd html strict level 3//", + "-//ietf//dtd html strict//", + "-//ietf//dtd html//", + "-//metrius//dtd metrius presentational//", + "-//microsoft//dtd internet explorer 2.0 html strict//", + "-//microsoft//dtd internet explorer 2.0 html//", + "-//microsoft//dtd internet explorer 2.0 tables//", + "-//microsoft//dtd internet explorer 3.0 html strict//", + "-//microsoft//dtd internet explorer 3.0 html//", + "-//microsoft//dtd internet explorer 3.0 tables//", + "-//netscape comm. corp.//dtd html//", + "-//netscape comm. corp.//dtd strict html//", + "-//o'reilly and associates//dtd html 2.0//", + "-//o'reilly and associates//dtd html extended 1.0//", + "-//o'reilly and associates//dtd html extended relaxed 1.0//", + "-//softquad software//dtd hotmetal pro 6.0::19990601::extensions to html 4.0//", + "-//softquad//dtd hotmetal pro 4.0::19971010::extensions to html 4.0//", + "-//spyglass//dtd html 2.0 extended//", + "-//sq//dtd html 2.0 hotmetal + extensions//", + "-//sun microsystems corp.//dtd hotjava html//", + "-//sun microsystems corp.//dtd hotjava strict html//", + "-//w3c//dtd html 3 1995-03-24//", + "-//w3c//dtd html 3.2 draft//", + "-//w3c//dtd html 3.2 final//", + "-//w3c//dtd html 3.2//", + "-//w3c//dtd html 3.2s draft//", + "-//w3c//dtd html 4.0 frameset//", + "-//w3c//dtd html 4.0 transitional//", + "-//w3c//dtd html experimental 19960712//", + "-//w3c//dtd html experimental 970421//", + "-//w3c//dtd w3 html//", + "-//w3o//dtd w3 html 3.0//", + "-//webtechs//dtd mozilla html 2.0//", + "-//webtechs//dtd mozilla html//", +} diff --git a/src/stackdriver-nozzle/vendor/golang.org/x/net/html/entity.go b/src/stackdriver-nozzle/vendor/golang.org/x/net/html/entity.go new file mode 100644 index 00000000..a50c04c6 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/golang.org/x/net/html/entity.go @@ -0,0 +1,2253 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +// All entities that do not end with ';' are 6 or fewer bytes long. +const longestEntityWithoutSemicolon = 6 + +// entity is a map from HTML entity names to their values. The semicolon matters: +// https://html.spec.whatwg.org/multipage/syntax.html#named-character-references +// lists both "amp" and "amp;" as two separate entries. +// +// Note that the HTML5 list is larger than the HTML4 list at +// http://www.w3.org/TR/html4/sgml/entities.html +var entity = map[string]rune{ + "AElig;": '\U000000C6', + "AMP;": '\U00000026', + "Aacute;": '\U000000C1', + "Abreve;": '\U00000102', + "Acirc;": '\U000000C2', + "Acy;": '\U00000410', + "Afr;": '\U0001D504', + "Agrave;": '\U000000C0', + "Alpha;": '\U00000391', + "Amacr;": '\U00000100', + "And;": '\U00002A53', + "Aogon;": '\U00000104', + "Aopf;": '\U0001D538', + "ApplyFunction;": '\U00002061', + "Aring;": '\U000000C5', + "Ascr;": '\U0001D49C', + "Assign;": '\U00002254', + "Atilde;": '\U000000C3', + "Auml;": '\U000000C4', + "Backslash;": '\U00002216', + "Barv;": '\U00002AE7', + "Barwed;": '\U00002306', + "Bcy;": '\U00000411', + "Because;": '\U00002235', + "Bernoullis;": '\U0000212C', + "Beta;": '\U00000392', + "Bfr;": '\U0001D505', + "Bopf;": '\U0001D539', + "Breve;": '\U000002D8', + "Bscr;": '\U0000212C', + "Bumpeq;": '\U0000224E', + "CHcy;": '\U00000427', + "COPY;": '\U000000A9', + "Cacute;": '\U00000106', + "Cap;": '\U000022D2', + "CapitalDifferentialD;": '\U00002145', + "Cayleys;": '\U0000212D', + "Ccaron;": '\U0000010C', + "Ccedil;": '\U000000C7', + "Ccirc;": '\U00000108', + "Cconint;": '\U00002230', + "Cdot;": '\U0000010A', + "Cedilla;": '\U000000B8', + "CenterDot;": '\U000000B7', + "Cfr;": '\U0000212D', + "Chi;": '\U000003A7', + "CircleDot;": '\U00002299', + "CircleMinus;": '\U00002296', + "CirclePlus;": '\U00002295', + "CircleTimes;": '\U00002297', + "ClockwiseContourIntegral;": '\U00002232', + "CloseCurlyDoubleQuote;": '\U0000201D', + "CloseCurlyQuote;": '\U00002019', + "Colon;": '\U00002237', + "Colone;": '\U00002A74', + "Congruent;": '\U00002261', + "Conint;": '\U0000222F', + "ContourIntegral;": '\U0000222E', + "Copf;": '\U00002102', + "Coproduct;": '\U00002210', + "CounterClockwiseContourIntegral;": '\U00002233', + "Cross;": '\U00002A2F', + "Cscr;": '\U0001D49E', + "Cup;": '\U000022D3', + "CupCap;": '\U0000224D', + "DD;": '\U00002145', + "DDotrahd;": '\U00002911', + "DJcy;": '\U00000402', + "DScy;": '\U00000405', + "DZcy;": '\U0000040F', + "Dagger;": '\U00002021', + "Darr;": '\U000021A1', + "Dashv;": '\U00002AE4', + "Dcaron;": '\U0000010E', + "Dcy;": '\U00000414', + "Del;": '\U00002207', + "Delta;": '\U00000394', + "Dfr;": '\U0001D507', + "DiacriticalAcute;": '\U000000B4', + "DiacriticalDot;": '\U000002D9', + "DiacriticalDoubleAcute;": '\U000002DD', + "DiacriticalGrave;": '\U00000060', + "DiacriticalTilde;": '\U000002DC', + "Diamond;": '\U000022C4', + "DifferentialD;": '\U00002146', + "Dopf;": '\U0001D53B', + "Dot;": '\U000000A8', + "DotDot;": '\U000020DC', + "DotEqual;": '\U00002250', + "DoubleContourIntegral;": '\U0000222F', + "DoubleDot;": '\U000000A8', + "DoubleDownArrow;": '\U000021D3', + "DoubleLeftArrow;": '\U000021D0', + "DoubleLeftRightArrow;": '\U000021D4', + "DoubleLeftTee;": '\U00002AE4', + "DoubleLongLeftArrow;": '\U000027F8', + "DoubleLongLeftRightArrow;": '\U000027FA', + "DoubleLongRightArrow;": '\U000027F9', + "DoubleRightArrow;": '\U000021D2', + "DoubleRightTee;": '\U000022A8', + "DoubleUpArrow;": '\U000021D1', + "DoubleUpDownArrow;": '\U000021D5', + "DoubleVerticalBar;": '\U00002225', + "DownArrow;": '\U00002193', + "DownArrowBar;": '\U00002913', + "DownArrowUpArrow;": '\U000021F5', + "DownBreve;": '\U00000311', + "DownLeftRightVector;": '\U00002950', + "DownLeftTeeVector;": '\U0000295E', + "DownLeftVector;": '\U000021BD', + "DownLeftVectorBar;": '\U00002956', + "DownRightTeeVector;": '\U0000295F', + "DownRightVector;": '\U000021C1', + "DownRightVectorBar;": '\U00002957', + "DownTee;": '\U000022A4', + "DownTeeArrow;": '\U000021A7', + "Downarrow;": '\U000021D3', + "Dscr;": '\U0001D49F', + "Dstrok;": '\U00000110', + "ENG;": '\U0000014A', + "ETH;": '\U000000D0', + "Eacute;": '\U000000C9', + "Ecaron;": '\U0000011A', + "Ecirc;": '\U000000CA', + "Ecy;": '\U0000042D', + "Edot;": '\U00000116', + "Efr;": '\U0001D508', + "Egrave;": '\U000000C8', + "Element;": '\U00002208', + "Emacr;": '\U00000112', + "EmptySmallSquare;": '\U000025FB', + "EmptyVerySmallSquare;": '\U000025AB', + "Eogon;": '\U00000118', + "Eopf;": '\U0001D53C', + "Epsilon;": '\U00000395', + "Equal;": '\U00002A75', + "EqualTilde;": '\U00002242', + "Equilibrium;": '\U000021CC', + "Escr;": '\U00002130', + "Esim;": '\U00002A73', + "Eta;": '\U00000397', + "Euml;": '\U000000CB', + "Exists;": '\U00002203', + "ExponentialE;": '\U00002147', + "Fcy;": '\U00000424', + "Ffr;": '\U0001D509', + "FilledSmallSquare;": '\U000025FC', + "FilledVerySmallSquare;": '\U000025AA', + "Fopf;": '\U0001D53D', + "ForAll;": '\U00002200', + "Fouriertrf;": '\U00002131', + "Fscr;": '\U00002131', + "GJcy;": '\U00000403', + "GT;": '\U0000003E', + "Gamma;": '\U00000393', + "Gammad;": '\U000003DC', + "Gbreve;": '\U0000011E', + "Gcedil;": '\U00000122', + "Gcirc;": '\U0000011C', + "Gcy;": '\U00000413', + "Gdot;": '\U00000120', + "Gfr;": '\U0001D50A', + "Gg;": '\U000022D9', + "Gopf;": '\U0001D53E', + "GreaterEqual;": '\U00002265', + "GreaterEqualLess;": '\U000022DB', + "GreaterFullEqual;": '\U00002267', + "GreaterGreater;": '\U00002AA2', + "GreaterLess;": '\U00002277', + "GreaterSlantEqual;": '\U00002A7E', + "GreaterTilde;": '\U00002273', + "Gscr;": '\U0001D4A2', + "Gt;": '\U0000226B', + "HARDcy;": '\U0000042A', + "Hacek;": '\U000002C7', + "Hat;": '\U0000005E', + "Hcirc;": '\U00000124', + "Hfr;": '\U0000210C', + "HilbertSpace;": '\U0000210B', + "Hopf;": '\U0000210D', + "HorizontalLine;": '\U00002500', + "Hscr;": '\U0000210B', + "Hstrok;": '\U00000126', + "HumpDownHump;": '\U0000224E', + "HumpEqual;": '\U0000224F', + "IEcy;": '\U00000415', + "IJlig;": '\U00000132', + "IOcy;": '\U00000401', + "Iacute;": '\U000000CD', + "Icirc;": '\U000000CE', + "Icy;": '\U00000418', + "Idot;": '\U00000130', + "Ifr;": '\U00002111', + "Igrave;": '\U000000CC', + "Im;": '\U00002111', + "Imacr;": '\U0000012A', + "ImaginaryI;": '\U00002148', + "Implies;": '\U000021D2', + "Int;": '\U0000222C', + "Integral;": '\U0000222B', + "Intersection;": '\U000022C2', + "InvisibleComma;": '\U00002063', + "InvisibleTimes;": '\U00002062', + "Iogon;": '\U0000012E', + "Iopf;": '\U0001D540', + "Iota;": '\U00000399', + "Iscr;": '\U00002110', + "Itilde;": '\U00000128', + "Iukcy;": '\U00000406', + "Iuml;": '\U000000CF', + "Jcirc;": '\U00000134', + "Jcy;": '\U00000419', + "Jfr;": '\U0001D50D', + "Jopf;": '\U0001D541', + "Jscr;": '\U0001D4A5', + "Jsercy;": '\U00000408', + "Jukcy;": '\U00000404', + "KHcy;": '\U00000425', + "KJcy;": '\U0000040C', + "Kappa;": '\U0000039A', + "Kcedil;": '\U00000136', + "Kcy;": '\U0000041A', + "Kfr;": '\U0001D50E', + "Kopf;": '\U0001D542', + "Kscr;": '\U0001D4A6', + "LJcy;": '\U00000409', + "LT;": '\U0000003C', + "Lacute;": '\U00000139', + "Lambda;": '\U0000039B', + "Lang;": '\U000027EA', + "Laplacetrf;": '\U00002112', + "Larr;": '\U0000219E', + "Lcaron;": '\U0000013D', + "Lcedil;": '\U0000013B', + "Lcy;": '\U0000041B', + "LeftAngleBracket;": '\U000027E8', + "LeftArrow;": '\U00002190', + "LeftArrowBar;": '\U000021E4', + "LeftArrowRightArrow;": '\U000021C6', + "LeftCeiling;": '\U00002308', + "LeftDoubleBracket;": '\U000027E6', + "LeftDownTeeVector;": '\U00002961', + "LeftDownVector;": '\U000021C3', + "LeftDownVectorBar;": '\U00002959', + "LeftFloor;": '\U0000230A', + "LeftRightArrow;": '\U00002194', + "LeftRightVector;": '\U0000294E', + "LeftTee;": '\U000022A3', + "LeftTeeArrow;": '\U000021A4', + "LeftTeeVector;": '\U0000295A', + "LeftTriangle;": '\U000022B2', + "LeftTriangleBar;": '\U000029CF', + "LeftTriangleEqual;": '\U000022B4', + "LeftUpDownVector;": '\U00002951', + "LeftUpTeeVector;": '\U00002960', + "LeftUpVector;": '\U000021BF', + "LeftUpVectorBar;": '\U00002958', + "LeftVector;": '\U000021BC', + "LeftVectorBar;": '\U00002952', + "Leftarrow;": '\U000021D0', + "Leftrightarrow;": '\U000021D4', + "LessEqualGreater;": '\U000022DA', + "LessFullEqual;": '\U00002266', + "LessGreater;": '\U00002276', + "LessLess;": '\U00002AA1', + "LessSlantEqual;": '\U00002A7D', + "LessTilde;": '\U00002272', + "Lfr;": '\U0001D50F', + "Ll;": '\U000022D8', + "Lleftarrow;": '\U000021DA', + "Lmidot;": '\U0000013F', + "LongLeftArrow;": '\U000027F5', + "LongLeftRightArrow;": '\U000027F7', + "LongRightArrow;": '\U000027F6', + "Longleftarrow;": '\U000027F8', + "Longleftrightarrow;": '\U000027FA', + "Longrightarrow;": '\U000027F9', + "Lopf;": '\U0001D543', + "LowerLeftArrow;": '\U00002199', + "LowerRightArrow;": '\U00002198', + "Lscr;": '\U00002112', + "Lsh;": '\U000021B0', + "Lstrok;": '\U00000141', + "Lt;": '\U0000226A', + "Map;": '\U00002905', + "Mcy;": '\U0000041C', + "MediumSpace;": '\U0000205F', + "Mellintrf;": '\U00002133', + "Mfr;": '\U0001D510', + "MinusPlus;": '\U00002213', + "Mopf;": '\U0001D544', + "Mscr;": '\U00002133', + "Mu;": '\U0000039C', + "NJcy;": '\U0000040A', + "Nacute;": '\U00000143', + "Ncaron;": '\U00000147', + "Ncedil;": '\U00000145', + "Ncy;": '\U0000041D', + "NegativeMediumSpace;": '\U0000200B', + "NegativeThickSpace;": '\U0000200B', + "NegativeThinSpace;": '\U0000200B', + "NegativeVeryThinSpace;": '\U0000200B', + "NestedGreaterGreater;": '\U0000226B', + "NestedLessLess;": '\U0000226A', + "NewLine;": '\U0000000A', + "Nfr;": '\U0001D511', + "NoBreak;": '\U00002060', + "NonBreakingSpace;": '\U000000A0', + "Nopf;": '\U00002115', + "Not;": '\U00002AEC', + "NotCongruent;": '\U00002262', + "NotCupCap;": '\U0000226D', + "NotDoubleVerticalBar;": '\U00002226', + "NotElement;": '\U00002209', + "NotEqual;": '\U00002260', + "NotExists;": '\U00002204', + "NotGreater;": '\U0000226F', + "NotGreaterEqual;": '\U00002271', + "NotGreaterLess;": '\U00002279', + "NotGreaterTilde;": '\U00002275', + "NotLeftTriangle;": '\U000022EA', + "NotLeftTriangleEqual;": '\U000022EC', + "NotLess;": '\U0000226E', + "NotLessEqual;": '\U00002270', + "NotLessGreater;": '\U00002278', + "NotLessTilde;": '\U00002274', + "NotPrecedes;": '\U00002280', + "NotPrecedesSlantEqual;": '\U000022E0', + "NotReverseElement;": '\U0000220C', + "NotRightTriangle;": '\U000022EB', + "NotRightTriangleEqual;": '\U000022ED', + "NotSquareSubsetEqual;": '\U000022E2', + "NotSquareSupersetEqual;": '\U000022E3', + "NotSubsetEqual;": '\U00002288', + "NotSucceeds;": '\U00002281', + "NotSucceedsSlantEqual;": '\U000022E1', + "NotSupersetEqual;": '\U00002289', + "NotTilde;": '\U00002241', + "NotTildeEqual;": '\U00002244', + "NotTildeFullEqual;": '\U00002247', + "NotTildeTilde;": '\U00002249', + "NotVerticalBar;": '\U00002224', + "Nscr;": '\U0001D4A9', + "Ntilde;": '\U000000D1', + "Nu;": '\U0000039D', + "OElig;": '\U00000152', + "Oacute;": '\U000000D3', + "Ocirc;": '\U000000D4', + "Ocy;": '\U0000041E', + "Odblac;": '\U00000150', + "Ofr;": '\U0001D512', + "Ograve;": '\U000000D2', + "Omacr;": '\U0000014C', + "Omega;": '\U000003A9', + "Omicron;": '\U0000039F', + "Oopf;": '\U0001D546', + "OpenCurlyDoubleQuote;": '\U0000201C', + "OpenCurlyQuote;": '\U00002018', + "Or;": '\U00002A54', + "Oscr;": '\U0001D4AA', + "Oslash;": '\U000000D8', + "Otilde;": '\U000000D5', + "Otimes;": '\U00002A37', + "Ouml;": '\U000000D6', + "OverBar;": '\U0000203E', + "OverBrace;": '\U000023DE', + "OverBracket;": '\U000023B4', + "OverParenthesis;": '\U000023DC', + "PartialD;": '\U00002202', + "Pcy;": '\U0000041F', + "Pfr;": '\U0001D513', + "Phi;": '\U000003A6', + "Pi;": '\U000003A0', + "PlusMinus;": '\U000000B1', + "Poincareplane;": '\U0000210C', + "Popf;": '\U00002119', + "Pr;": '\U00002ABB', + "Precedes;": '\U0000227A', + "PrecedesEqual;": '\U00002AAF', + "PrecedesSlantEqual;": '\U0000227C', + "PrecedesTilde;": '\U0000227E', + "Prime;": '\U00002033', + "Product;": '\U0000220F', + "Proportion;": '\U00002237', + "Proportional;": '\U0000221D', + "Pscr;": '\U0001D4AB', + "Psi;": '\U000003A8', + "QUOT;": '\U00000022', + "Qfr;": '\U0001D514', + "Qopf;": '\U0000211A', + "Qscr;": '\U0001D4AC', + "RBarr;": '\U00002910', + "REG;": '\U000000AE', + "Racute;": '\U00000154', + "Rang;": '\U000027EB', + "Rarr;": '\U000021A0', + "Rarrtl;": '\U00002916', + "Rcaron;": '\U00000158', + "Rcedil;": '\U00000156', + "Rcy;": '\U00000420', + "Re;": '\U0000211C', + "ReverseElement;": '\U0000220B', + "ReverseEquilibrium;": '\U000021CB', + "ReverseUpEquilibrium;": '\U0000296F', + "Rfr;": '\U0000211C', + "Rho;": '\U000003A1', + "RightAngleBracket;": '\U000027E9', + "RightArrow;": '\U00002192', + "RightArrowBar;": '\U000021E5', + "RightArrowLeftArrow;": '\U000021C4', + "RightCeiling;": '\U00002309', + "RightDoubleBracket;": '\U000027E7', + "RightDownTeeVector;": '\U0000295D', + "RightDownVector;": '\U000021C2', + "RightDownVectorBar;": '\U00002955', + "RightFloor;": '\U0000230B', + "RightTee;": '\U000022A2', + "RightTeeArrow;": '\U000021A6', + "RightTeeVector;": '\U0000295B', + "RightTriangle;": '\U000022B3', + "RightTriangleBar;": '\U000029D0', + "RightTriangleEqual;": '\U000022B5', + "RightUpDownVector;": '\U0000294F', + "RightUpTeeVector;": '\U0000295C', + "RightUpVector;": '\U000021BE', + "RightUpVectorBar;": '\U00002954', + "RightVector;": '\U000021C0', + "RightVectorBar;": '\U00002953', + "Rightarrow;": '\U000021D2', + "Ropf;": '\U0000211D', + "RoundImplies;": '\U00002970', + "Rrightarrow;": '\U000021DB', + "Rscr;": '\U0000211B', + "Rsh;": '\U000021B1', + "RuleDelayed;": '\U000029F4', + "SHCHcy;": '\U00000429', + "SHcy;": '\U00000428', + "SOFTcy;": '\U0000042C', + "Sacute;": '\U0000015A', + "Sc;": '\U00002ABC', + "Scaron;": '\U00000160', + "Scedil;": '\U0000015E', + "Scirc;": '\U0000015C', + "Scy;": '\U00000421', + "Sfr;": '\U0001D516', + "ShortDownArrow;": '\U00002193', + "ShortLeftArrow;": '\U00002190', + "ShortRightArrow;": '\U00002192', + "ShortUpArrow;": '\U00002191', + "Sigma;": '\U000003A3', + "SmallCircle;": '\U00002218', + "Sopf;": '\U0001D54A', + "Sqrt;": '\U0000221A', + "Square;": '\U000025A1', + "SquareIntersection;": '\U00002293', + "SquareSubset;": '\U0000228F', + "SquareSubsetEqual;": '\U00002291', + "SquareSuperset;": '\U00002290', + "SquareSupersetEqual;": '\U00002292', + "SquareUnion;": '\U00002294', + "Sscr;": '\U0001D4AE', + "Star;": '\U000022C6', + "Sub;": '\U000022D0', + "Subset;": '\U000022D0', + "SubsetEqual;": '\U00002286', + "Succeeds;": '\U0000227B', + "SucceedsEqual;": '\U00002AB0', + "SucceedsSlantEqual;": '\U0000227D', + "SucceedsTilde;": '\U0000227F', + "SuchThat;": '\U0000220B', + "Sum;": '\U00002211', + "Sup;": '\U000022D1', + "Superset;": '\U00002283', + "SupersetEqual;": '\U00002287', + "Supset;": '\U000022D1', + "THORN;": '\U000000DE', + "TRADE;": '\U00002122', + "TSHcy;": '\U0000040B', + "TScy;": '\U00000426', + "Tab;": '\U00000009', + "Tau;": '\U000003A4', + "Tcaron;": '\U00000164', + "Tcedil;": '\U00000162', + "Tcy;": '\U00000422', + "Tfr;": '\U0001D517', + "Therefore;": '\U00002234', + "Theta;": '\U00000398', + "ThinSpace;": '\U00002009', + "Tilde;": '\U0000223C', + "TildeEqual;": '\U00002243', + "TildeFullEqual;": '\U00002245', + "TildeTilde;": '\U00002248', + "Topf;": '\U0001D54B', + "TripleDot;": '\U000020DB', + "Tscr;": '\U0001D4AF', + "Tstrok;": '\U00000166', + "Uacute;": '\U000000DA', + "Uarr;": '\U0000219F', + "Uarrocir;": '\U00002949', + "Ubrcy;": '\U0000040E', + "Ubreve;": '\U0000016C', + "Ucirc;": '\U000000DB', + "Ucy;": '\U00000423', + "Udblac;": '\U00000170', + "Ufr;": '\U0001D518', + "Ugrave;": '\U000000D9', + "Umacr;": '\U0000016A', + "UnderBar;": '\U0000005F', + "UnderBrace;": '\U000023DF', + "UnderBracket;": '\U000023B5', + "UnderParenthesis;": '\U000023DD', + "Union;": '\U000022C3', + "UnionPlus;": '\U0000228E', + "Uogon;": '\U00000172', + "Uopf;": '\U0001D54C', + "UpArrow;": '\U00002191', + "UpArrowBar;": '\U00002912', + "UpArrowDownArrow;": '\U000021C5', + "UpDownArrow;": '\U00002195', + "UpEquilibrium;": '\U0000296E', + "UpTee;": '\U000022A5', + "UpTeeArrow;": '\U000021A5', + "Uparrow;": '\U000021D1', + "Updownarrow;": '\U000021D5', + "UpperLeftArrow;": '\U00002196', + "UpperRightArrow;": '\U00002197', + "Upsi;": '\U000003D2', + "Upsilon;": '\U000003A5', + "Uring;": '\U0000016E', + "Uscr;": '\U0001D4B0', + "Utilde;": '\U00000168', + "Uuml;": '\U000000DC', + "VDash;": '\U000022AB', + "Vbar;": '\U00002AEB', + "Vcy;": '\U00000412', + "Vdash;": '\U000022A9', + "Vdashl;": '\U00002AE6', + "Vee;": '\U000022C1', + "Verbar;": '\U00002016', + "Vert;": '\U00002016', + "VerticalBar;": '\U00002223', + "VerticalLine;": '\U0000007C', + "VerticalSeparator;": '\U00002758', + "VerticalTilde;": '\U00002240', + "VeryThinSpace;": '\U0000200A', + "Vfr;": '\U0001D519', + "Vopf;": '\U0001D54D', + "Vscr;": '\U0001D4B1', + "Vvdash;": '\U000022AA', + "Wcirc;": '\U00000174', + "Wedge;": '\U000022C0', + "Wfr;": '\U0001D51A', + "Wopf;": '\U0001D54E', + "Wscr;": '\U0001D4B2', + "Xfr;": '\U0001D51B', + "Xi;": '\U0000039E', + "Xopf;": '\U0001D54F', + "Xscr;": '\U0001D4B3', + "YAcy;": '\U0000042F', + "YIcy;": '\U00000407', + "YUcy;": '\U0000042E', + "Yacute;": '\U000000DD', + "Ycirc;": '\U00000176', + "Ycy;": '\U0000042B', + "Yfr;": '\U0001D51C', + "Yopf;": '\U0001D550', + "Yscr;": '\U0001D4B4', + "Yuml;": '\U00000178', + "ZHcy;": '\U00000416', + "Zacute;": '\U00000179', + "Zcaron;": '\U0000017D', + "Zcy;": '\U00000417', + "Zdot;": '\U0000017B', + "ZeroWidthSpace;": '\U0000200B', + "Zeta;": '\U00000396', + "Zfr;": '\U00002128', + "Zopf;": '\U00002124', + "Zscr;": '\U0001D4B5', + "aacute;": '\U000000E1', + "abreve;": '\U00000103', + "ac;": '\U0000223E', + "acd;": '\U0000223F', + "acirc;": '\U000000E2', + "acute;": '\U000000B4', + "acy;": '\U00000430', + "aelig;": '\U000000E6', + "af;": '\U00002061', + "afr;": '\U0001D51E', + "agrave;": '\U000000E0', + "alefsym;": '\U00002135', + "aleph;": '\U00002135', + "alpha;": '\U000003B1', + "amacr;": '\U00000101', + "amalg;": '\U00002A3F', + "amp;": '\U00000026', + "and;": '\U00002227', + "andand;": '\U00002A55', + "andd;": '\U00002A5C', + "andslope;": '\U00002A58', + "andv;": '\U00002A5A', + "ang;": '\U00002220', + "ange;": '\U000029A4', + "angle;": '\U00002220', + "angmsd;": '\U00002221', + "angmsdaa;": '\U000029A8', + "angmsdab;": '\U000029A9', + "angmsdac;": '\U000029AA', + "angmsdad;": '\U000029AB', + "angmsdae;": '\U000029AC', + "angmsdaf;": '\U000029AD', + "angmsdag;": '\U000029AE', + "angmsdah;": '\U000029AF', + "angrt;": '\U0000221F', + "angrtvb;": '\U000022BE', + "angrtvbd;": '\U0000299D', + "angsph;": '\U00002222', + "angst;": '\U000000C5', + "angzarr;": '\U0000237C', + "aogon;": '\U00000105', + "aopf;": '\U0001D552', + "ap;": '\U00002248', + "apE;": '\U00002A70', + "apacir;": '\U00002A6F', + "ape;": '\U0000224A', + "apid;": '\U0000224B', + "apos;": '\U00000027', + "approx;": '\U00002248', + "approxeq;": '\U0000224A', + "aring;": '\U000000E5', + "ascr;": '\U0001D4B6', + "ast;": '\U0000002A', + "asymp;": '\U00002248', + "asympeq;": '\U0000224D', + "atilde;": '\U000000E3', + "auml;": '\U000000E4', + "awconint;": '\U00002233', + "awint;": '\U00002A11', + "bNot;": '\U00002AED', + "backcong;": '\U0000224C', + "backepsilon;": '\U000003F6', + "backprime;": '\U00002035', + "backsim;": '\U0000223D', + "backsimeq;": '\U000022CD', + "barvee;": '\U000022BD', + "barwed;": '\U00002305', + "barwedge;": '\U00002305', + "bbrk;": '\U000023B5', + "bbrktbrk;": '\U000023B6', + "bcong;": '\U0000224C', + "bcy;": '\U00000431', + "bdquo;": '\U0000201E', + "becaus;": '\U00002235', + "because;": '\U00002235', + "bemptyv;": '\U000029B0', + "bepsi;": '\U000003F6', + "bernou;": '\U0000212C', + "beta;": '\U000003B2', + "beth;": '\U00002136', + "between;": '\U0000226C', + "bfr;": '\U0001D51F', + "bigcap;": '\U000022C2', + "bigcirc;": '\U000025EF', + "bigcup;": '\U000022C3', + "bigodot;": '\U00002A00', + "bigoplus;": '\U00002A01', + "bigotimes;": '\U00002A02', + "bigsqcup;": '\U00002A06', + "bigstar;": '\U00002605', + "bigtriangledown;": '\U000025BD', + "bigtriangleup;": '\U000025B3', + "biguplus;": '\U00002A04', + "bigvee;": '\U000022C1', + "bigwedge;": '\U000022C0', + "bkarow;": '\U0000290D', + "blacklozenge;": '\U000029EB', + "blacksquare;": '\U000025AA', + "blacktriangle;": '\U000025B4', + "blacktriangledown;": '\U000025BE', + "blacktriangleleft;": '\U000025C2', + "blacktriangleright;": '\U000025B8', + "blank;": '\U00002423', + "blk12;": '\U00002592', + "blk14;": '\U00002591', + "blk34;": '\U00002593', + "block;": '\U00002588', + "bnot;": '\U00002310', + "bopf;": '\U0001D553', + "bot;": '\U000022A5', + "bottom;": '\U000022A5', + "bowtie;": '\U000022C8', + "boxDL;": '\U00002557', + "boxDR;": '\U00002554', + "boxDl;": '\U00002556', + "boxDr;": '\U00002553', + "boxH;": '\U00002550', + "boxHD;": '\U00002566', + "boxHU;": '\U00002569', + "boxHd;": '\U00002564', + "boxHu;": '\U00002567', + "boxUL;": '\U0000255D', + "boxUR;": '\U0000255A', + "boxUl;": '\U0000255C', + "boxUr;": '\U00002559', + "boxV;": '\U00002551', + "boxVH;": '\U0000256C', + "boxVL;": '\U00002563', + "boxVR;": '\U00002560', + "boxVh;": '\U0000256B', + "boxVl;": '\U00002562', + "boxVr;": '\U0000255F', + "boxbox;": '\U000029C9', + "boxdL;": '\U00002555', + "boxdR;": '\U00002552', + "boxdl;": '\U00002510', + "boxdr;": '\U0000250C', + "boxh;": '\U00002500', + "boxhD;": '\U00002565', + "boxhU;": '\U00002568', + "boxhd;": '\U0000252C', + "boxhu;": '\U00002534', + "boxminus;": '\U0000229F', + "boxplus;": '\U0000229E', + "boxtimes;": '\U000022A0', + "boxuL;": '\U0000255B', + "boxuR;": '\U00002558', + "boxul;": '\U00002518', + "boxur;": '\U00002514', + "boxv;": '\U00002502', + "boxvH;": '\U0000256A', + "boxvL;": '\U00002561', + "boxvR;": '\U0000255E', + "boxvh;": '\U0000253C', + "boxvl;": '\U00002524', + "boxvr;": '\U0000251C', + "bprime;": '\U00002035', + "breve;": '\U000002D8', + "brvbar;": '\U000000A6', + "bscr;": '\U0001D4B7', + "bsemi;": '\U0000204F', + "bsim;": '\U0000223D', + "bsime;": '\U000022CD', + "bsol;": '\U0000005C', + "bsolb;": '\U000029C5', + "bsolhsub;": '\U000027C8', + "bull;": '\U00002022', + "bullet;": '\U00002022', + "bump;": '\U0000224E', + "bumpE;": '\U00002AAE', + "bumpe;": '\U0000224F', + "bumpeq;": '\U0000224F', + "cacute;": '\U00000107', + "cap;": '\U00002229', + "capand;": '\U00002A44', + "capbrcup;": '\U00002A49', + "capcap;": '\U00002A4B', + "capcup;": '\U00002A47', + "capdot;": '\U00002A40', + "caret;": '\U00002041', + "caron;": '\U000002C7', + "ccaps;": '\U00002A4D', + "ccaron;": '\U0000010D', + "ccedil;": '\U000000E7', + "ccirc;": '\U00000109', + "ccups;": '\U00002A4C', + "ccupssm;": '\U00002A50', + "cdot;": '\U0000010B', + "cedil;": '\U000000B8', + "cemptyv;": '\U000029B2', + "cent;": '\U000000A2', + "centerdot;": '\U000000B7', + "cfr;": '\U0001D520', + "chcy;": '\U00000447', + "check;": '\U00002713', + "checkmark;": '\U00002713', + "chi;": '\U000003C7', + "cir;": '\U000025CB', + "cirE;": '\U000029C3', + "circ;": '\U000002C6', + "circeq;": '\U00002257', + "circlearrowleft;": '\U000021BA', + "circlearrowright;": '\U000021BB', + "circledR;": '\U000000AE', + "circledS;": '\U000024C8', + "circledast;": '\U0000229B', + "circledcirc;": '\U0000229A', + "circleddash;": '\U0000229D', + "cire;": '\U00002257', + "cirfnint;": '\U00002A10', + "cirmid;": '\U00002AEF', + "cirscir;": '\U000029C2', + "clubs;": '\U00002663', + "clubsuit;": '\U00002663', + "colon;": '\U0000003A', + "colone;": '\U00002254', + "coloneq;": '\U00002254', + "comma;": '\U0000002C', + "commat;": '\U00000040', + "comp;": '\U00002201', + "compfn;": '\U00002218', + "complement;": '\U00002201', + "complexes;": '\U00002102', + "cong;": '\U00002245', + "congdot;": '\U00002A6D', + "conint;": '\U0000222E', + "copf;": '\U0001D554', + "coprod;": '\U00002210', + "copy;": '\U000000A9', + "copysr;": '\U00002117', + "crarr;": '\U000021B5', + "cross;": '\U00002717', + "cscr;": '\U0001D4B8', + "csub;": '\U00002ACF', + "csube;": '\U00002AD1', + "csup;": '\U00002AD0', + "csupe;": '\U00002AD2', + "ctdot;": '\U000022EF', + "cudarrl;": '\U00002938', + "cudarrr;": '\U00002935', + "cuepr;": '\U000022DE', + "cuesc;": '\U000022DF', + "cularr;": '\U000021B6', + "cularrp;": '\U0000293D', + "cup;": '\U0000222A', + "cupbrcap;": '\U00002A48', + "cupcap;": '\U00002A46', + "cupcup;": '\U00002A4A', + "cupdot;": '\U0000228D', + "cupor;": '\U00002A45', + "curarr;": '\U000021B7', + "curarrm;": '\U0000293C', + "curlyeqprec;": '\U000022DE', + "curlyeqsucc;": '\U000022DF', + "curlyvee;": '\U000022CE', + "curlywedge;": '\U000022CF', + "curren;": '\U000000A4', + "curvearrowleft;": '\U000021B6', + "curvearrowright;": '\U000021B7', + "cuvee;": '\U000022CE', + "cuwed;": '\U000022CF', + "cwconint;": '\U00002232', + "cwint;": '\U00002231', + "cylcty;": '\U0000232D', + "dArr;": '\U000021D3', + "dHar;": '\U00002965', + "dagger;": '\U00002020', + "daleth;": '\U00002138', + "darr;": '\U00002193', + "dash;": '\U00002010', + "dashv;": '\U000022A3', + "dbkarow;": '\U0000290F', + "dblac;": '\U000002DD', + "dcaron;": '\U0000010F', + "dcy;": '\U00000434', + "dd;": '\U00002146', + "ddagger;": '\U00002021', + "ddarr;": '\U000021CA', + "ddotseq;": '\U00002A77', + "deg;": '\U000000B0', + "delta;": '\U000003B4', + "demptyv;": '\U000029B1', + "dfisht;": '\U0000297F', + "dfr;": '\U0001D521', + "dharl;": '\U000021C3', + "dharr;": '\U000021C2', + "diam;": '\U000022C4', + "diamond;": '\U000022C4', + "diamondsuit;": '\U00002666', + "diams;": '\U00002666', + "die;": '\U000000A8', + "digamma;": '\U000003DD', + "disin;": '\U000022F2', + "div;": '\U000000F7', + "divide;": '\U000000F7', + "divideontimes;": '\U000022C7', + "divonx;": '\U000022C7', + "djcy;": '\U00000452', + "dlcorn;": '\U0000231E', + "dlcrop;": '\U0000230D', + "dollar;": '\U00000024', + "dopf;": '\U0001D555', + "dot;": '\U000002D9', + "doteq;": '\U00002250', + "doteqdot;": '\U00002251', + "dotminus;": '\U00002238', + "dotplus;": '\U00002214', + "dotsquare;": '\U000022A1', + "doublebarwedge;": '\U00002306', + "downarrow;": '\U00002193', + "downdownarrows;": '\U000021CA', + "downharpoonleft;": '\U000021C3', + "downharpoonright;": '\U000021C2', + "drbkarow;": '\U00002910', + "drcorn;": '\U0000231F', + "drcrop;": '\U0000230C', + "dscr;": '\U0001D4B9', + "dscy;": '\U00000455', + "dsol;": '\U000029F6', + "dstrok;": '\U00000111', + "dtdot;": '\U000022F1', + "dtri;": '\U000025BF', + "dtrif;": '\U000025BE', + "duarr;": '\U000021F5', + "duhar;": '\U0000296F', + "dwangle;": '\U000029A6', + "dzcy;": '\U0000045F', + "dzigrarr;": '\U000027FF', + "eDDot;": '\U00002A77', + "eDot;": '\U00002251', + "eacute;": '\U000000E9', + "easter;": '\U00002A6E', + "ecaron;": '\U0000011B', + "ecir;": '\U00002256', + "ecirc;": '\U000000EA', + "ecolon;": '\U00002255', + "ecy;": '\U0000044D', + "edot;": '\U00000117', + "ee;": '\U00002147', + "efDot;": '\U00002252', + "efr;": '\U0001D522', + "eg;": '\U00002A9A', + "egrave;": '\U000000E8', + "egs;": '\U00002A96', + "egsdot;": '\U00002A98', + "el;": '\U00002A99', + "elinters;": '\U000023E7', + "ell;": '\U00002113', + "els;": '\U00002A95', + "elsdot;": '\U00002A97', + "emacr;": '\U00000113', + "empty;": '\U00002205', + "emptyset;": '\U00002205', + "emptyv;": '\U00002205', + "emsp;": '\U00002003', + "emsp13;": '\U00002004', + "emsp14;": '\U00002005', + "eng;": '\U0000014B', + "ensp;": '\U00002002', + "eogon;": '\U00000119', + "eopf;": '\U0001D556', + "epar;": '\U000022D5', + "eparsl;": '\U000029E3', + "eplus;": '\U00002A71', + "epsi;": '\U000003B5', + "epsilon;": '\U000003B5', + "epsiv;": '\U000003F5', + "eqcirc;": '\U00002256', + "eqcolon;": '\U00002255', + "eqsim;": '\U00002242', + "eqslantgtr;": '\U00002A96', + "eqslantless;": '\U00002A95', + "equals;": '\U0000003D', + "equest;": '\U0000225F', + "equiv;": '\U00002261', + "equivDD;": '\U00002A78', + "eqvparsl;": '\U000029E5', + "erDot;": '\U00002253', + "erarr;": '\U00002971', + "escr;": '\U0000212F', + "esdot;": '\U00002250', + "esim;": '\U00002242', + "eta;": '\U000003B7', + "eth;": '\U000000F0', + "euml;": '\U000000EB', + "euro;": '\U000020AC', + "excl;": '\U00000021', + "exist;": '\U00002203', + "expectation;": '\U00002130', + "exponentiale;": '\U00002147', + "fallingdotseq;": '\U00002252', + "fcy;": '\U00000444', + "female;": '\U00002640', + "ffilig;": '\U0000FB03', + "fflig;": '\U0000FB00', + "ffllig;": '\U0000FB04', + "ffr;": '\U0001D523', + "filig;": '\U0000FB01', + "flat;": '\U0000266D', + "fllig;": '\U0000FB02', + "fltns;": '\U000025B1', + "fnof;": '\U00000192', + "fopf;": '\U0001D557', + "forall;": '\U00002200', + "fork;": '\U000022D4', + "forkv;": '\U00002AD9', + "fpartint;": '\U00002A0D', + "frac12;": '\U000000BD', + "frac13;": '\U00002153', + "frac14;": '\U000000BC', + "frac15;": '\U00002155', + "frac16;": '\U00002159', + "frac18;": '\U0000215B', + "frac23;": '\U00002154', + "frac25;": '\U00002156', + "frac34;": '\U000000BE', + "frac35;": '\U00002157', + "frac38;": '\U0000215C', + "frac45;": '\U00002158', + "frac56;": '\U0000215A', + "frac58;": '\U0000215D', + "frac78;": '\U0000215E', + "frasl;": '\U00002044', + "frown;": '\U00002322', + "fscr;": '\U0001D4BB', + "gE;": '\U00002267', + "gEl;": '\U00002A8C', + "gacute;": '\U000001F5', + "gamma;": '\U000003B3', + "gammad;": '\U000003DD', + "gap;": '\U00002A86', + "gbreve;": '\U0000011F', + "gcirc;": '\U0000011D', + "gcy;": '\U00000433', + "gdot;": '\U00000121', + "ge;": '\U00002265', + "gel;": '\U000022DB', + "geq;": '\U00002265', + "geqq;": '\U00002267', + "geqslant;": '\U00002A7E', + "ges;": '\U00002A7E', + "gescc;": '\U00002AA9', + "gesdot;": '\U00002A80', + "gesdoto;": '\U00002A82', + "gesdotol;": '\U00002A84', + "gesles;": '\U00002A94', + "gfr;": '\U0001D524', + "gg;": '\U0000226B', + "ggg;": '\U000022D9', + "gimel;": '\U00002137', + "gjcy;": '\U00000453', + "gl;": '\U00002277', + "glE;": '\U00002A92', + "gla;": '\U00002AA5', + "glj;": '\U00002AA4', + "gnE;": '\U00002269', + "gnap;": '\U00002A8A', + "gnapprox;": '\U00002A8A', + "gne;": '\U00002A88', + "gneq;": '\U00002A88', + "gneqq;": '\U00002269', + "gnsim;": '\U000022E7', + "gopf;": '\U0001D558', + "grave;": '\U00000060', + "gscr;": '\U0000210A', + "gsim;": '\U00002273', + "gsime;": '\U00002A8E', + "gsiml;": '\U00002A90', + "gt;": '\U0000003E', + "gtcc;": '\U00002AA7', + "gtcir;": '\U00002A7A', + "gtdot;": '\U000022D7', + "gtlPar;": '\U00002995', + "gtquest;": '\U00002A7C', + "gtrapprox;": '\U00002A86', + "gtrarr;": '\U00002978', + "gtrdot;": '\U000022D7', + "gtreqless;": '\U000022DB', + "gtreqqless;": '\U00002A8C', + "gtrless;": '\U00002277', + "gtrsim;": '\U00002273', + "hArr;": '\U000021D4', + "hairsp;": '\U0000200A', + "half;": '\U000000BD', + "hamilt;": '\U0000210B', + "hardcy;": '\U0000044A', + "harr;": '\U00002194', + "harrcir;": '\U00002948', + "harrw;": '\U000021AD', + "hbar;": '\U0000210F', + "hcirc;": '\U00000125', + "hearts;": '\U00002665', + "heartsuit;": '\U00002665', + "hellip;": '\U00002026', + "hercon;": '\U000022B9', + "hfr;": '\U0001D525', + "hksearow;": '\U00002925', + "hkswarow;": '\U00002926', + "hoarr;": '\U000021FF', + "homtht;": '\U0000223B', + "hookleftarrow;": '\U000021A9', + "hookrightarrow;": '\U000021AA', + "hopf;": '\U0001D559', + "horbar;": '\U00002015', + "hscr;": '\U0001D4BD', + "hslash;": '\U0000210F', + "hstrok;": '\U00000127', + "hybull;": '\U00002043', + "hyphen;": '\U00002010', + "iacute;": '\U000000ED', + "ic;": '\U00002063', + "icirc;": '\U000000EE', + "icy;": '\U00000438', + "iecy;": '\U00000435', + "iexcl;": '\U000000A1', + "iff;": '\U000021D4', + "ifr;": '\U0001D526', + "igrave;": '\U000000EC', + "ii;": '\U00002148', + "iiiint;": '\U00002A0C', + "iiint;": '\U0000222D', + "iinfin;": '\U000029DC', + "iiota;": '\U00002129', + "ijlig;": '\U00000133', + "imacr;": '\U0000012B', + "image;": '\U00002111', + "imagline;": '\U00002110', + "imagpart;": '\U00002111', + "imath;": '\U00000131', + "imof;": '\U000022B7', + "imped;": '\U000001B5', + "in;": '\U00002208', + "incare;": '\U00002105', + "infin;": '\U0000221E', + "infintie;": '\U000029DD', + "inodot;": '\U00000131', + "int;": '\U0000222B', + "intcal;": '\U000022BA', + "integers;": '\U00002124', + "intercal;": '\U000022BA', + "intlarhk;": '\U00002A17', + "intprod;": '\U00002A3C', + "iocy;": '\U00000451', + "iogon;": '\U0000012F', + "iopf;": '\U0001D55A', + "iota;": '\U000003B9', + "iprod;": '\U00002A3C', + "iquest;": '\U000000BF', + "iscr;": '\U0001D4BE', + "isin;": '\U00002208', + "isinE;": '\U000022F9', + "isindot;": '\U000022F5', + "isins;": '\U000022F4', + "isinsv;": '\U000022F3', + "isinv;": '\U00002208', + "it;": '\U00002062', + "itilde;": '\U00000129', + "iukcy;": '\U00000456', + "iuml;": '\U000000EF', + "jcirc;": '\U00000135', + "jcy;": '\U00000439', + "jfr;": '\U0001D527', + "jmath;": '\U00000237', + "jopf;": '\U0001D55B', + "jscr;": '\U0001D4BF', + "jsercy;": '\U00000458', + "jukcy;": '\U00000454', + "kappa;": '\U000003BA', + "kappav;": '\U000003F0', + "kcedil;": '\U00000137', + "kcy;": '\U0000043A', + "kfr;": '\U0001D528', + "kgreen;": '\U00000138', + "khcy;": '\U00000445', + "kjcy;": '\U0000045C', + "kopf;": '\U0001D55C', + "kscr;": '\U0001D4C0', + "lAarr;": '\U000021DA', + "lArr;": '\U000021D0', + "lAtail;": '\U0000291B', + "lBarr;": '\U0000290E', + "lE;": '\U00002266', + "lEg;": '\U00002A8B', + "lHar;": '\U00002962', + "lacute;": '\U0000013A', + "laemptyv;": '\U000029B4', + "lagran;": '\U00002112', + "lambda;": '\U000003BB', + "lang;": '\U000027E8', + "langd;": '\U00002991', + "langle;": '\U000027E8', + "lap;": '\U00002A85', + "laquo;": '\U000000AB', + "larr;": '\U00002190', + "larrb;": '\U000021E4', + "larrbfs;": '\U0000291F', + "larrfs;": '\U0000291D', + "larrhk;": '\U000021A9', + "larrlp;": '\U000021AB', + "larrpl;": '\U00002939', + "larrsim;": '\U00002973', + "larrtl;": '\U000021A2', + "lat;": '\U00002AAB', + "latail;": '\U00002919', + "late;": '\U00002AAD', + "lbarr;": '\U0000290C', + "lbbrk;": '\U00002772', + "lbrace;": '\U0000007B', + "lbrack;": '\U0000005B', + "lbrke;": '\U0000298B', + "lbrksld;": '\U0000298F', + "lbrkslu;": '\U0000298D', + "lcaron;": '\U0000013E', + "lcedil;": '\U0000013C', + "lceil;": '\U00002308', + "lcub;": '\U0000007B', + "lcy;": '\U0000043B', + "ldca;": '\U00002936', + "ldquo;": '\U0000201C', + "ldquor;": '\U0000201E', + "ldrdhar;": '\U00002967', + "ldrushar;": '\U0000294B', + "ldsh;": '\U000021B2', + "le;": '\U00002264', + "leftarrow;": '\U00002190', + "leftarrowtail;": '\U000021A2', + "leftharpoondown;": '\U000021BD', + "leftharpoonup;": '\U000021BC', + "leftleftarrows;": '\U000021C7', + "leftrightarrow;": '\U00002194', + "leftrightarrows;": '\U000021C6', + "leftrightharpoons;": '\U000021CB', + "leftrightsquigarrow;": '\U000021AD', + "leftthreetimes;": '\U000022CB', + "leg;": '\U000022DA', + "leq;": '\U00002264', + "leqq;": '\U00002266', + "leqslant;": '\U00002A7D', + "les;": '\U00002A7D', + "lescc;": '\U00002AA8', + "lesdot;": '\U00002A7F', + "lesdoto;": '\U00002A81', + "lesdotor;": '\U00002A83', + "lesges;": '\U00002A93', + "lessapprox;": '\U00002A85', + "lessdot;": '\U000022D6', + "lesseqgtr;": '\U000022DA', + "lesseqqgtr;": '\U00002A8B', + "lessgtr;": '\U00002276', + "lesssim;": '\U00002272', + "lfisht;": '\U0000297C', + "lfloor;": '\U0000230A', + "lfr;": '\U0001D529', + "lg;": '\U00002276', + "lgE;": '\U00002A91', + "lhard;": '\U000021BD', + "lharu;": '\U000021BC', + "lharul;": '\U0000296A', + "lhblk;": '\U00002584', + "ljcy;": '\U00000459', + "ll;": '\U0000226A', + "llarr;": '\U000021C7', + "llcorner;": '\U0000231E', + "llhard;": '\U0000296B', + "lltri;": '\U000025FA', + "lmidot;": '\U00000140', + "lmoust;": '\U000023B0', + "lmoustache;": '\U000023B0', + "lnE;": '\U00002268', + "lnap;": '\U00002A89', + "lnapprox;": '\U00002A89', + "lne;": '\U00002A87', + "lneq;": '\U00002A87', + "lneqq;": '\U00002268', + "lnsim;": '\U000022E6', + "loang;": '\U000027EC', + "loarr;": '\U000021FD', + "lobrk;": '\U000027E6', + "longleftarrow;": '\U000027F5', + "longleftrightarrow;": '\U000027F7', + "longmapsto;": '\U000027FC', + "longrightarrow;": '\U000027F6', + "looparrowleft;": '\U000021AB', + "looparrowright;": '\U000021AC', + "lopar;": '\U00002985', + "lopf;": '\U0001D55D', + "loplus;": '\U00002A2D', + "lotimes;": '\U00002A34', + "lowast;": '\U00002217', + "lowbar;": '\U0000005F', + "loz;": '\U000025CA', + "lozenge;": '\U000025CA', + "lozf;": '\U000029EB', + "lpar;": '\U00000028', + "lparlt;": '\U00002993', + "lrarr;": '\U000021C6', + "lrcorner;": '\U0000231F', + "lrhar;": '\U000021CB', + "lrhard;": '\U0000296D', + "lrm;": '\U0000200E', + "lrtri;": '\U000022BF', + "lsaquo;": '\U00002039', + "lscr;": '\U0001D4C1', + "lsh;": '\U000021B0', + "lsim;": '\U00002272', + "lsime;": '\U00002A8D', + "lsimg;": '\U00002A8F', + "lsqb;": '\U0000005B', + "lsquo;": '\U00002018', + "lsquor;": '\U0000201A', + "lstrok;": '\U00000142', + "lt;": '\U0000003C', + "ltcc;": '\U00002AA6', + "ltcir;": '\U00002A79', + "ltdot;": '\U000022D6', + "lthree;": '\U000022CB', + "ltimes;": '\U000022C9', + "ltlarr;": '\U00002976', + "ltquest;": '\U00002A7B', + "ltrPar;": '\U00002996', + "ltri;": '\U000025C3', + "ltrie;": '\U000022B4', + "ltrif;": '\U000025C2', + "lurdshar;": '\U0000294A', + "luruhar;": '\U00002966', + "mDDot;": '\U0000223A', + "macr;": '\U000000AF', + "male;": '\U00002642', + "malt;": '\U00002720', + "maltese;": '\U00002720', + "map;": '\U000021A6', + "mapsto;": '\U000021A6', + "mapstodown;": '\U000021A7', + "mapstoleft;": '\U000021A4', + "mapstoup;": '\U000021A5', + "marker;": '\U000025AE', + "mcomma;": '\U00002A29', + "mcy;": '\U0000043C', + "mdash;": '\U00002014', + "measuredangle;": '\U00002221', + "mfr;": '\U0001D52A', + "mho;": '\U00002127', + "micro;": '\U000000B5', + "mid;": '\U00002223', + "midast;": '\U0000002A', + "midcir;": '\U00002AF0', + "middot;": '\U000000B7', + "minus;": '\U00002212', + "minusb;": '\U0000229F', + "minusd;": '\U00002238', + "minusdu;": '\U00002A2A', + "mlcp;": '\U00002ADB', + "mldr;": '\U00002026', + "mnplus;": '\U00002213', + "models;": '\U000022A7', + "mopf;": '\U0001D55E', + "mp;": '\U00002213', + "mscr;": '\U0001D4C2', + "mstpos;": '\U0000223E', + "mu;": '\U000003BC', + "multimap;": '\U000022B8', + "mumap;": '\U000022B8', + "nLeftarrow;": '\U000021CD', + "nLeftrightarrow;": '\U000021CE', + "nRightarrow;": '\U000021CF', + "nVDash;": '\U000022AF', + "nVdash;": '\U000022AE', + "nabla;": '\U00002207', + "nacute;": '\U00000144', + "nap;": '\U00002249', + "napos;": '\U00000149', + "napprox;": '\U00002249', + "natur;": '\U0000266E', + "natural;": '\U0000266E', + "naturals;": '\U00002115', + "nbsp;": '\U000000A0', + "ncap;": '\U00002A43', + "ncaron;": '\U00000148', + "ncedil;": '\U00000146', + "ncong;": '\U00002247', + "ncup;": '\U00002A42', + "ncy;": '\U0000043D', + "ndash;": '\U00002013', + "ne;": '\U00002260', + "neArr;": '\U000021D7', + "nearhk;": '\U00002924', + "nearr;": '\U00002197', + "nearrow;": '\U00002197', + "nequiv;": '\U00002262', + "nesear;": '\U00002928', + "nexist;": '\U00002204', + "nexists;": '\U00002204', + "nfr;": '\U0001D52B', + "nge;": '\U00002271', + "ngeq;": '\U00002271', + "ngsim;": '\U00002275', + "ngt;": '\U0000226F', + "ngtr;": '\U0000226F', + "nhArr;": '\U000021CE', + "nharr;": '\U000021AE', + "nhpar;": '\U00002AF2', + "ni;": '\U0000220B', + "nis;": '\U000022FC', + "nisd;": '\U000022FA', + "niv;": '\U0000220B', + "njcy;": '\U0000045A', + "nlArr;": '\U000021CD', + "nlarr;": '\U0000219A', + "nldr;": '\U00002025', + "nle;": '\U00002270', + "nleftarrow;": '\U0000219A', + "nleftrightarrow;": '\U000021AE', + "nleq;": '\U00002270', + "nless;": '\U0000226E', + "nlsim;": '\U00002274', + "nlt;": '\U0000226E', + "nltri;": '\U000022EA', + "nltrie;": '\U000022EC', + "nmid;": '\U00002224', + "nopf;": '\U0001D55F', + "not;": '\U000000AC', + "notin;": '\U00002209', + "notinva;": '\U00002209', + "notinvb;": '\U000022F7', + "notinvc;": '\U000022F6', + "notni;": '\U0000220C', + "notniva;": '\U0000220C', + "notnivb;": '\U000022FE', + "notnivc;": '\U000022FD', + "npar;": '\U00002226', + "nparallel;": '\U00002226', + "npolint;": '\U00002A14', + "npr;": '\U00002280', + "nprcue;": '\U000022E0', + "nprec;": '\U00002280', + "nrArr;": '\U000021CF', + "nrarr;": '\U0000219B', + "nrightarrow;": '\U0000219B', + "nrtri;": '\U000022EB', + "nrtrie;": '\U000022ED', + "nsc;": '\U00002281', + "nsccue;": '\U000022E1', + "nscr;": '\U0001D4C3', + "nshortmid;": '\U00002224', + "nshortparallel;": '\U00002226', + "nsim;": '\U00002241', + "nsime;": '\U00002244', + "nsimeq;": '\U00002244', + "nsmid;": '\U00002224', + "nspar;": '\U00002226', + "nsqsube;": '\U000022E2', + "nsqsupe;": '\U000022E3', + "nsub;": '\U00002284', + "nsube;": '\U00002288', + "nsubseteq;": '\U00002288', + "nsucc;": '\U00002281', + "nsup;": '\U00002285', + "nsupe;": '\U00002289', + "nsupseteq;": '\U00002289', + "ntgl;": '\U00002279', + "ntilde;": '\U000000F1', + "ntlg;": '\U00002278', + "ntriangleleft;": '\U000022EA', + "ntrianglelefteq;": '\U000022EC', + "ntriangleright;": '\U000022EB', + "ntrianglerighteq;": '\U000022ED', + "nu;": '\U000003BD', + "num;": '\U00000023', + "numero;": '\U00002116', + "numsp;": '\U00002007', + "nvDash;": '\U000022AD', + "nvHarr;": '\U00002904', + "nvdash;": '\U000022AC', + "nvinfin;": '\U000029DE', + "nvlArr;": '\U00002902', + "nvrArr;": '\U00002903', + "nwArr;": '\U000021D6', + "nwarhk;": '\U00002923', + "nwarr;": '\U00002196', + "nwarrow;": '\U00002196', + "nwnear;": '\U00002927', + "oS;": '\U000024C8', + "oacute;": '\U000000F3', + "oast;": '\U0000229B', + "ocir;": '\U0000229A', + "ocirc;": '\U000000F4', + "ocy;": '\U0000043E', + "odash;": '\U0000229D', + "odblac;": '\U00000151', + "odiv;": '\U00002A38', + "odot;": '\U00002299', + "odsold;": '\U000029BC', + "oelig;": '\U00000153', + "ofcir;": '\U000029BF', + "ofr;": '\U0001D52C', + "ogon;": '\U000002DB', + "ograve;": '\U000000F2', + "ogt;": '\U000029C1', + "ohbar;": '\U000029B5', + "ohm;": '\U000003A9', + "oint;": '\U0000222E', + "olarr;": '\U000021BA', + "olcir;": '\U000029BE', + "olcross;": '\U000029BB', + "oline;": '\U0000203E', + "olt;": '\U000029C0', + "omacr;": '\U0000014D', + "omega;": '\U000003C9', + "omicron;": '\U000003BF', + "omid;": '\U000029B6', + "ominus;": '\U00002296', + "oopf;": '\U0001D560', + "opar;": '\U000029B7', + "operp;": '\U000029B9', + "oplus;": '\U00002295', + "or;": '\U00002228', + "orarr;": '\U000021BB', + "ord;": '\U00002A5D', + "order;": '\U00002134', + "orderof;": '\U00002134', + "ordf;": '\U000000AA', + "ordm;": '\U000000BA', + "origof;": '\U000022B6', + "oror;": '\U00002A56', + "orslope;": '\U00002A57', + "orv;": '\U00002A5B', + "oscr;": '\U00002134', + "oslash;": '\U000000F8', + "osol;": '\U00002298', + "otilde;": '\U000000F5', + "otimes;": '\U00002297', + "otimesas;": '\U00002A36', + "ouml;": '\U000000F6', + "ovbar;": '\U0000233D', + "par;": '\U00002225', + "para;": '\U000000B6', + "parallel;": '\U00002225', + "parsim;": '\U00002AF3', + "parsl;": '\U00002AFD', + "part;": '\U00002202', + "pcy;": '\U0000043F', + "percnt;": '\U00000025', + "period;": '\U0000002E', + "permil;": '\U00002030', + "perp;": '\U000022A5', + "pertenk;": '\U00002031', + "pfr;": '\U0001D52D', + "phi;": '\U000003C6', + "phiv;": '\U000003D5', + "phmmat;": '\U00002133', + "phone;": '\U0000260E', + "pi;": '\U000003C0', + "pitchfork;": '\U000022D4', + "piv;": '\U000003D6', + "planck;": '\U0000210F', + "planckh;": '\U0000210E', + "plankv;": '\U0000210F', + "plus;": '\U0000002B', + "plusacir;": '\U00002A23', + "plusb;": '\U0000229E', + "pluscir;": '\U00002A22', + "plusdo;": '\U00002214', + "plusdu;": '\U00002A25', + "pluse;": '\U00002A72', + "plusmn;": '\U000000B1', + "plussim;": '\U00002A26', + "plustwo;": '\U00002A27', + "pm;": '\U000000B1', + "pointint;": '\U00002A15', + "popf;": '\U0001D561', + "pound;": '\U000000A3', + "pr;": '\U0000227A', + "prE;": '\U00002AB3', + "prap;": '\U00002AB7', + "prcue;": '\U0000227C', + "pre;": '\U00002AAF', + "prec;": '\U0000227A', + "precapprox;": '\U00002AB7', + "preccurlyeq;": '\U0000227C', + "preceq;": '\U00002AAF', + "precnapprox;": '\U00002AB9', + "precneqq;": '\U00002AB5', + "precnsim;": '\U000022E8', + "precsim;": '\U0000227E', + "prime;": '\U00002032', + "primes;": '\U00002119', + "prnE;": '\U00002AB5', + "prnap;": '\U00002AB9', + "prnsim;": '\U000022E8', + "prod;": '\U0000220F', + "profalar;": '\U0000232E', + "profline;": '\U00002312', + "profsurf;": '\U00002313', + "prop;": '\U0000221D', + "propto;": '\U0000221D', + "prsim;": '\U0000227E', + "prurel;": '\U000022B0', + "pscr;": '\U0001D4C5', + "psi;": '\U000003C8', + "puncsp;": '\U00002008', + "qfr;": '\U0001D52E', + "qint;": '\U00002A0C', + "qopf;": '\U0001D562', + "qprime;": '\U00002057', + "qscr;": '\U0001D4C6', + "quaternions;": '\U0000210D', + "quatint;": '\U00002A16', + "quest;": '\U0000003F', + "questeq;": '\U0000225F', + "quot;": '\U00000022', + "rAarr;": '\U000021DB', + "rArr;": '\U000021D2', + "rAtail;": '\U0000291C', + "rBarr;": '\U0000290F', + "rHar;": '\U00002964', + "racute;": '\U00000155', + "radic;": '\U0000221A', + "raemptyv;": '\U000029B3', + "rang;": '\U000027E9', + "rangd;": '\U00002992', + "range;": '\U000029A5', + "rangle;": '\U000027E9', + "raquo;": '\U000000BB', + "rarr;": '\U00002192', + "rarrap;": '\U00002975', + "rarrb;": '\U000021E5', + "rarrbfs;": '\U00002920', + "rarrc;": '\U00002933', + "rarrfs;": '\U0000291E', + "rarrhk;": '\U000021AA', + "rarrlp;": '\U000021AC', + "rarrpl;": '\U00002945', + "rarrsim;": '\U00002974', + "rarrtl;": '\U000021A3', + "rarrw;": '\U0000219D', + "ratail;": '\U0000291A', + "ratio;": '\U00002236', + "rationals;": '\U0000211A', + "rbarr;": '\U0000290D', + "rbbrk;": '\U00002773', + "rbrace;": '\U0000007D', + "rbrack;": '\U0000005D', + "rbrke;": '\U0000298C', + "rbrksld;": '\U0000298E', + "rbrkslu;": '\U00002990', + "rcaron;": '\U00000159', + "rcedil;": '\U00000157', + "rceil;": '\U00002309', + "rcub;": '\U0000007D', + "rcy;": '\U00000440', + "rdca;": '\U00002937', + "rdldhar;": '\U00002969', + "rdquo;": '\U0000201D', + "rdquor;": '\U0000201D', + "rdsh;": '\U000021B3', + "real;": '\U0000211C', + "realine;": '\U0000211B', + "realpart;": '\U0000211C', + "reals;": '\U0000211D', + "rect;": '\U000025AD', + "reg;": '\U000000AE', + "rfisht;": '\U0000297D', + "rfloor;": '\U0000230B', + "rfr;": '\U0001D52F', + "rhard;": '\U000021C1', + "rharu;": '\U000021C0', + "rharul;": '\U0000296C', + "rho;": '\U000003C1', + "rhov;": '\U000003F1', + "rightarrow;": '\U00002192', + "rightarrowtail;": '\U000021A3', + "rightharpoondown;": '\U000021C1', + "rightharpoonup;": '\U000021C0', + "rightleftarrows;": '\U000021C4', + "rightleftharpoons;": '\U000021CC', + "rightrightarrows;": '\U000021C9', + "rightsquigarrow;": '\U0000219D', + "rightthreetimes;": '\U000022CC', + "ring;": '\U000002DA', + "risingdotseq;": '\U00002253', + "rlarr;": '\U000021C4', + "rlhar;": '\U000021CC', + "rlm;": '\U0000200F', + "rmoust;": '\U000023B1', + "rmoustache;": '\U000023B1', + "rnmid;": '\U00002AEE', + "roang;": '\U000027ED', + "roarr;": '\U000021FE', + "robrk;": '\U000027E7', + "ropar;": '\U00002986', + "ropf;": '\U0001D563', + "roplus;": '\U00002A2E', + "rotimes;": '\U00002A35', + "rpar;": '\U00000029', + "rpargt;": '\U00002994', + "rppolint;": '\U00002A12', + "rrarr;": '\U000021C9', + "rsaquo;": '\U0000203A', + "rscr;": '\U0001D4C7', + "rsh;": '\U000021B1', + "rsqb;": '\U0000005D', + "rsquo;": '\U00002019', + "rsquor;": '\U00002019', + "rthree;": '\U000022CC', + "rtimes;": '\U000022CA', + "rtri;": '\U000025B9', + "rtrie;": '\U000022B5', + "rtrif;": '\U000025B8', + "rtriltri;": '\U000029CE', + "ruluhar;": '\U00002968', + "rx;": '\U0000211E', + "sacute;": '\U0000015B', + "sbquo;": '\U0000201A', + "sc;": '\U0000227B', + "scE;": '\U00002AB4', + "scap;": '\U00002AB8', + "scaron;": '\U00000161', + "sccue;": '\U0000227D', + "sce;": '\U00002AB0', + "scedil;": '\U0000015F', + "scirc;": '\U0000015D', + "scnE;": '\U00002AB6', + "scnap;": '\U00002ABA', + "scnsim;": '\U000022E9', + "scpolint;": '\U00002A13', + "scsim;": '\U0000227F', + "scy;": '\U00000441', + "sdot;": '\U000022C5', + "sdotb;": '\U000022A1', + "sdote;": '\U00002A66', + "seArr;": '\U000021D8', + "searhk;": '\U00002925', + "searr;": '\U00002198', + "searrow;": '\U00002198', + "sect;": '\U000000A7', + "semi;": '\U0000003B', + "seswar;": '\U00002929', + "setminus;": '\U00002216', + "setmn;": '\U00002216', + "sext;": '\U00002736', + "sfr;": '\U0001D530', + "sfrown;": '\U00002322', + "sharp;": '\U0000266F', + "shchcy;": '\U00000449', + "shcy;": '\U00000448', + "shortmid;": '\U00002223', + "shortparallel;": '\U00002225', + "shy;": '\U000000AD', + "sigma;": '\U000003C3', + "sigmaf;": '\U000003C2', + "sigmav;": '\U000003C2', + "sim;": '\U0000223C', + "simdot;": '\U00002A6A', + "sime;": '\U00002243', + "simeq;": '\U00002243', + "simg;": '\U00002A9E', + "simgE;": '\U00002AA0', + "siml;": '\U00002A9D', + "simlE;": '\U00002A9F', + "simne;": '\U00002246', + "simplus;": '\U00002A24', + "simrarr;": '\U00002972', + "slarr;": '\U00002190', + "smallsetminus;": '\U00002216', + "smashp;": '\U00002A33', + "smeparsl;": '\U000029E4', + "smid;": '\U00002223', + "smile;": '\U00002323', + "smt;": '\U00002AAA', + "smte;": '\U00002AAC', + "softcy;": '\U0000044C', + "sol;": '\U0000002F', + "solb;": '\U000029C4', + "solbar;": '\U0000233F', + "sopf;": '\U0001D564', + "spades;": '\U00002660', + "spadesuit;": '\U00002660', + "spar;": '\U00002225', + "sqcap;": '\U00002293', + "sqcup;": '\U00002294', + "sqsub;": '\U0000228F', + "sqsube;": '\U00002291', + "sqsubset;": '\U0000228F', + "sqsubseteq;": '\U00002291', + "sqsup;": '\U00002290', + "sqsupe;": '\U00002292', + "sqsupset;": '\U00002290', + "sqsupseteq;": '\U00002292', + "squ;": '\U000025A1', + "square;": '\U000025A1', + "squarf;": '\U000025AA', + "squf;": '\U000025AA', + "srarr;": '\U00002192', + "sscr;": '\U0001D4C8', + "ssetmn;": '\U00002216', + "ssmile;": '\U00002323', + "sstarf;": '\U000022C6', + "star;": '\U00002606', + "starf;": '\U00002605', + "straightepsilon;": '\U000003F5', + "straightphi;": '\U000003D5', + "strns;": '\U000000AF', + "sub;": '\U00002282', + "subE;": '\U00002AC5', + "subdot;": '\U00002ABD', + "sube;": '\U00002286', + "subedot;": '\U00002AC3', + "submult;": '\U00002AC1', + "subnE;": '\U00002ACB', + "subne;": '\U0000228A', + "subplus;": '\U00002ABF', + "subrarr;": '\U00002979', + "subset;": '\U00002282', + "subseteq;": '\U00002286', + "subseteqq;": '\U00002AC5', + "subsetneq;": '\U0000228A', + "subsetneqq;": '\U00002ACB', + "subsim;": '\U00002AC7', + "subsub;": '\U00002AD5', + "subsup;": '\U00002AD3', + "succ;": '\U0000227B', + "succapprox;": '\U00002AB8', + "succcurlyeq;": '\U0000227D', + "succeq;": '\U00002AB0', + "succnapprox;": '\U00002ABA', + "succneqq;": '\U00002AB6', + "succnsim;": '\U000022E9', + "succsim;": '\U0000227F', + "sum;": '\U00002211', + "sung;": '\U0000266A', + "sup;": '\U00002283', + "sup1;": '\U000000B9', + "sup2;": '\U000000B2', + "sup3;": '\U000000B3', + "supE;": '\U00002AC6', + "supdot;": '\U00002ABE', + "supdsub;": '\U00002AD8', + "supe;": '\U00002287', + "supedot;": '\U00002AC4', + "suphsol;": '\U000027C9', + "suphsub;": '\U00002AD7', + "suplarr;": '\U0000297B', + "supmult;": '\U00002AC2', + "supnE;": '\U00002ACC', + "supne;": '\U0000228B', + "supplus;": '\U00002AC0', + "supset;": '\U00002283', + "supseteq;": '\U00002287', + "supseteqq;": '\U00002AC6', + "supsetneq;": '\U0000228B', + "supsetneqq;": '\U00002ACC', + "supsim;": '\U00002AC8', + "supsub;": '\U00002AD4', + "supsup;": '\U00002AD6', + "swArr;": '\U000021D9', + "swarhk;": '\U00002926', + "swarr;": '\U00002199', + "swarrow;": '\U00002199', + "swnwar;": '\U0000292A', + "szlig;": '\U000000DF', + "target;": '\U00002316', + "tau;": '\U000003C4', + "tbrk;": '\U000023B4', + "tcaron;": '\U00000165', + "tcedil;": '\U00000163', + "tcy;": '\U00000442', + "tdot;": '\U000020DB', + "telrec;": '\U00002315', + "tfr;": '\U0001D531', + "there4;": '\U00002234', + "therefore;": '\U00002234', + "theta;": '\U000003B8', + "thetasym;": '\U000003D1', + "thetav;": '\U000003D1', + "thickapprox;": '\U00002248', + "thicksim;": '\U0000223C', + "thinsp;": '\U00002009', + "thkap;": '\U00002248', + "thksim;": '\U0000223C', + "thorn;": '\U000000FE', + "tilde;": '\U000002DC', + "times;": '\U000000D7', + "timesb;": '\U000022A0', + "timesbar;": '\U00002A31', + "timesd;": '\U00002A30', + "tint;": '\U0000222D', + "toea;": '\U00002928', + "top;": '\U000022A4', + "topbot;": '\U00002336', + "topcir;": '\U00002AF1', + "topf;": '\U0001D565', + "topfork;": '\U00002ADA', + "tosa;": '\U00002929', + "tprime;": '\U00002034', + "trade;": '\U00002122', + "triangle;": '\U000025B5', + "triangledown;": '\U000025BF', + "triangleleft;": '\U000025C3', + "trianglelefteq;": '\U000022B4', + "triangleq;": '\U0000225C', + "triangleright;": '\U000025B9', + "trianglerighteq;": '\U000022B5', + "tridot;": '\U000025EC', + "trie;": '\U0000225C', + "triminus;": '\U00002A3A', + "triplus;": '\U00002A39', + "trisb;": '\U000029CD', + "tritime;": '\U00002A3B', + "trpezium;": '\U000023E2', + "tscr;": '\U0001D4C9', + "tscy;": '\U00000446', + "tshcy;": '\U0000045B', + "tstrok;": '\U00000167', + "twixt;": '\U0000226C', + "twoheadleftarrow;": '\U0000219E', + "twoheadrightarrow;": '\U000021A0', + "uArr;": '\U000021D1', + "uHar;": '\U00002963', + "uacute;": '\U000000FA', + "uarr;": '\U00002191', + "ubrcy;": '\U0000045E', + "ubreve;": '\U0000016D', + "ucirc;": '\U000000FB', + "ucy;": '\U00000443', + "udarr;": '\U000021C5', + "udblac;": '\U00000171', + "udhar;": '\U0000296E', + "ufisht;": '\U0000297E', + "ufr;": '\U0001D532', + "ugrave;": '\U000000F9', + "uharl;": '\U000021BF', + "uharr;": '\U000021BE', + "uhblk;": '\U00002580', + "ulcorn;": '\U0000231C', + "ulcorner;": '\U0000231C', + "ulcrop;": '\U0000230F', + "ultri;": '\U000025F8', + "umacr;": '\U0000016B', + "uml;": '\U000000A8', + "uogon;": '\U00000173', + "uopf;": '\U0001D566', + "uparrow;": '\U00002191', + "updownarrow;": '\U00002195', + "upharpoonleft;": '\U000021BF', + "upharpoonright;": '\U000021BE', + "uplus;": '\U0000228E', + "upsi;": '\U000003C5', + "upsih;": '\U000003D2', + "upsilon;": '\U000003C5', + "upuparrows;": '\U000021C8', + "urcorn;": '\U0000231D', + "urcorner;": '\U0000231D', + "urcrop;": '\U0000230E', + "uring;": '\U0000016F', + "urtri;": '\U000025F9', + "uscr;": '\U0001D4CA', + "utdot;": '\U000022F0', + "utilde;": '\U00000169', + "utri;": '\U000025B5', + "utrif;": '\U000025B4', + "uuarr;": '\U000021C8', + "uuml;": '\U000000FC', + "uwangle;": '\U000029A7', + "vArr;": '\U000021D5', + "vBar;": '\U00002AE8', + "vBarv;": '\U00002AE9', + "vDash;": '\U000022A8', + "vangrt;": '\U0000299C', + "varepsilon;": '\U000003F5', + "varkappa;": '\U000003F0', + "varnothing;": '\U00002205', + "varphi;": '\U000003D5', + "varpi;": '\U000003D6', + "varpropto;": '\U0000221D', + "varr;": '\U00002195', + "varrho;": '\U000003F1', + "varsigma;": '\U000003C2', + "vartheta;": '\U000003D1', + "vartriangleleft;": '\U000022B2', + "vartriangleright;": '\U000022B3', + "vcy;": '\U00000432', + "vdash;": '\U000022A2', + "vee;": '\U00002228', + "veebar;": '\U000022BB', + "veeeq;": '\U0000225A', + "vellip;": '\U000022EE', + "verbar;": '\U0000007C', + "vert;": '\U0000007C', + "vfr;": '\U0001D533', + "vltri;": '\U000022B2', + "vopf;": '\U0001D567', + "vprop;": '\U0000221D', + "vrtri;": '\U000022B3', + "vscr;": '\U0001D4CB', + "vzigzag;": '\U0000299A', + "wcirc;": '\U00000175', + "wedbar;": '\U00002A5F', + "wedge;": '\U00002227', + "wedgeq;": '\U00002259', + "weierp;": '\U00002118', + "wfr;": '\U0001D534', + "wopf;": '\U0001D568', + "wp;": '\U00002118', + "wr;": '\U00002240', + "wreath;": '\U00002240', + "wscr;": '\U0001D4CC', + "xcap;": '\U000022C2', + "xcirc;": '\U000025EF', + "xcup;": '\U000022C3', + "xdtri;": '\U000025BD', + "xfr;": '\U0001D535', + "xhArr;": '\U000027FA', + "xharr;": '\U000027F7', + "xi;": '\U000003BE', + "xlArr;": '\U000027F8', + "xlarr;": '\U000027F5', + "xmap;": '\U000027FC', + "xnis;": '\U000022FB', + "xodot;": '\U00002A00', + "xopf;": '\U0001D569', + "xoplus;": '\U00002A01', + "xotime;": '\U00002A02', + "xrArr;": '\U000027F9', + "xrarr;": '\U000027F6', + "xscr;": '\U0001D4CD', + "xsqcup;": '\U00002A06', + "xuplus;": '\U00002A04', + "xutri;": '\U000025B3', + "xvee;": '\U000022C1', + "xwedge;": '\U000022C0', + "yacute;": '\U000000FD', + "yacy;": '\U0000044F', + "ycirc;": '\U00000177', + "ycy;": '\U0000044B', + "yen;": '\U000000A5', + "yfr;": '\U0001D536', + "yicy;": '\U00000457', + "yopf;": '\U0001D56A', + "yscr;": '\U0001D4CE', + "yucy;": '\U0000044E', + "yuml;": '\U000000FF', + "zacute;": '\U0000017A', + "zcaron;": '\U0000017E', + "zcy;": '\U00000437', + "zdot;": '\U0000017C', + "zeetrf;": '\U00002128', + "zeta;": '\U000003B6', + "zfr;": '\U0001D537', + "zhcy;": '\U00000436', + "zigrarr;": '\U000021DD', + "zopf;": '\U0001D56B', + "zscr;": '\U0001D4CF', + "zwj;": '\U0000200D', + "zwnj;": '\U0000200C', + "AElig": '\U000000C6', + "AMP": '\U00000026', + "Aacute": '\U000000C1', + "Acirc": '\U000000C2', + "Agrave": '\U000000C0', + "Aring": '\U000000C5', + "Atilde": '\U000000C3', + "Auml": '\U000000C4', + "COPY": '\U000000A9', + "Ccedil": '\U000000C7', + "ETH": '\U000000D0', + "Eacute": '\U000000C9', + "Ecirc": '\U000000CA', + "Egrave": '\U000000C8', + "Euml": '\U000000CB', + "GT": '\U0000003E', + "Iacute": '\U000000CD', + "Icirc": '\U000000CE', + "Igrave": '\U000000CC', + "Iuml": '\U000000CF', + "LT": '\U0000003C', + "Ntilde": '\U000000D1', + "Oacute": '\U000000D3', + "Ocirc": '\U000000D4', + "Ograve": '\U000000D2', + "Oslash": '\U000000D8', + "Otilde": '\U000000D5', + "Ouml": '\U000000D6', + "QUOT": '\U00000022', + "REG": '\U000000AE', + "THORN": '\U000000DE', + "Uacute": '\U000000DA', + "Ucirc": '\U000000DB', + "Ugrave": '\U000000D9', + "Uuml": '\U000000DC', + "Yacute": '\U000000DD', + "aacute": '\U000000E1', + "acirc": '\U000000E2', + "acute": '\U000000B4', + "aelig": '\U000000E6', + "agrave": '\U000000E0', + "amp": '\U00000026', + "aring": '\U000000E5', + "atilde": '\U000000E3', + "auml": '\U000000E4', + "brvbar": '\U000000A6', + "ccedil": '\U000000E7', + "cedil": '\U000000B8', + "cent": '\U000000A2', + "copy": '\U000000A9', + "curren": '\U000000A4', + "deg": '\U000000B0', + "divide": '\U000000F7', + "eacute": '\U000000E9', + "ecirc": '\U000000EA', + "egrave": '\U000000E8', + "eth": '\U000000F0', + "euml": '\U000000EB', + "frac12": '\U000000BD', + "frac14": '\U000000BC', + "frac34": '\U000000BE', + "gt": '\U0000003E', + "iacute": '\U000000ED', + "icirc": '\U000000EE', + "iexcl": '\U000000A1', + "igrave": '\U000000EC', + "iquest": '\U000000BF', + "iuml": '\U000000EF', + "laquo": '\U000000AB', + "lt": '\U0000003C', + "macr": '\U000000AF', + "micro": '\U000000B5', + "middot": '\U000000B7', + "nbsp": '\U000000A0', + "not": '\U000000AC', + "ntilde": '\U000000F1', + "oacute": '\U000000F3', + "ocirc": '\U000000F4', + "ograve": '\U000000F2', + "ordf": '\U000000AA', + "ordm": '\U000000BA', + "oslash": '\U000000F8', + "otilde": '\U000000F5', + "ouml": '\U000000F6', + "para": '\U000000B6', + "plusmn": '\U000000B1', + "pound": '\U000000A3', + "quot": '\U00000022', + "raquo": '\U000000BB', + "reg": '\U000000AE', + "sect": '\U000000A7', + "shy": '\U000000AD', + "sup1": '\U000000B9', + "sup2": '\U000000B2', + "sup3": '\U000000B3', + "szlig": '\U000000DF', + "thorn": '\U000000FE', + "times": '\U000000D7', + "uacute": '\U000000FA', + "ucirc": '\U000000FB', + "ugrave": '\U000000F9', + "uml": '\U000000A8', + "uuml": '\U000000FC', + "yacute": '\U000000FD', + "yen": '\U000000A5', + "yuml": '\U000000FF', +} + +// HTML entities that are two unicode codepoints. +var entity2 = map[string][2]rune{ + // TODO(nigeltao): Handle replacements that are wider than their names. + // "nLt;": {'\u226A', '\u20D2'}, + // "nGt;": {'\u226B', '\u20D2'}, + "NotEqualTilde;": {'\u2242', '\u0338'}, + "NotGreaterFullEqual;": {'\u2267', '\u0338'}, + "NotGreaterGreater;": {'\u226B', '\u0338'}, + "NotGreaterSlantEqual;": {'\u2A7E', '\u0338'}, + "NotHumpDownHump;": {'\u224E', '\u0338'}, + "NotHumpEqual;": {'\u224F', '\u0338'}, + "NotLeftTriangleBar;": {'\u29CF', '\u0338'}, + "NotLessLess;": {'\u226A', '\u0338'}, + "NotLessSlantEqual;": {'\u2A7D', '\u0338'}, + "NotNestedGreaterGreater;": {'\u2AA2', '\u0338'}, + "NotNestedLessLess;": {'\u2AA1', '\u0338'}, + "NotPrecedesEqual;": {'\u2AAF', '\u0338'}, + "NotRightTriangleBar;": {'\u29D0', '\u0338'}, + "NotSquareSubset;": {'\u228F', '\u0338'}, + "NotSquareSuperset;": {'\u2290', '\u0338'}, + "NotSubset;": {'\u2282', '\u20D2'}, + "NotSucceedsEqual;": {'\u2AB0', '\u0338'}, + "NotSucceedsTilde;": {'\u227F', '\u0338'}, + "NotSuperset;": {'\u2283', '\u20D2'}, + "ThickSpace;": {'\u205F', '\u200A'}, + "acE;": {'\u223E', '\u0333'}, + "bne;": {'\u003D', '\u20E5'}, + "bnequiv;": {'\u2261', '\u20E5'}, + "caps;": {'\u2229', '\uFE00'}, + "cups;": {'\u222A', '\uFE00'}, + "fjlig;": {'\u0066', '\u006A'}, + "gesl;": {'\u22DB', '\uFE00'}, + "gvertneqq;": {'\u2269', '\uFE00'}, + "gvnE;": {'\u2269', '\uFE00'}, + "lates;": {'\u2AAD', '\uFE00'}, + "lesg;": {'\u22DA', '\uFE00'}, + "lvertneqq;": {'\u2268', '\uFE00'}, + "lvnE;": {'\u2268', '\uFE00'}, + "nGg;": {'\u22D9', '\u0338'}, + "nGtv;": {'\u226B', '\u0338'}, + "nLl;": {'\u22D8', '\u0338'}, + "nLtv;": {'\u226A', '\u0338'}, + "nang;": {'\u2220', '\u20D2'}, + "napE;": {'\u2A70', '\u0338'}, + "napid;": {'\u224B', '\u0338'}, + "nbump;": {'\u224E', '\u0338'}, + "nbumpe;": {'\u224F', '\u0338'}, + "ncongdot;": {'\u2A6D', '\u0338'}, + "nedot;": {'\u2250', '\u0338'}, + "nesim;": {'\u2242', '\u0338'}, + "ngE;": {'\u2267', '\u0338'}, + "ngeqq;": {'\u2267', '\u0338'}, + "ngeqslant;": {'\u2A7E', '\u0338'}, + "nges;": {'\u2A7E', '\u0338'}, + "nlE;": {'\u2266', '\u0338'}, + "nleqq;": {'\u2266', '\u0338'}, + "nleqslant;": {'\u2A7D', '\u0338'}, + "nles;": {'\u2A7D', '\u0338'}, + "notinE;": {'\u22F9', '\u0338'}, + "notindot;": {'\u22F5', '\u0338'}, + "nparsl;": {'\u2AFD', '\u20E5'}, + "npart;": {'\u2202', '\u0338'}, + "npre;": {'\u2AAF', '\u0338'}, + "npreceq;": {'\u2AAF', '\u0338'}, + "nrarrc;": {'\u2933', '\u0338'}, + "nrarrw;": {'\u219D', '\u0338'}, + "nsce;": {'\u2AB0', '\u0338'}, + "nsubE;": {'\u2AC5', '\u0338'}, + "nsubset;": {'\u2282', '\u20D2'}, + "nsubseteqq;": {'\u2AC5', '\u0338'}, + "nsucceq;": {'\u2AB0', '\u0338'}, + "nsupE;": {'\u2AC6', '\u0338'}, + "nsupset;": {'\u2283', '\u20D2'}, + "nsupseteqq;": {'\u2AC6', '\u0338'}, + "nvap;": {'\u224D', '\u20D2'}, + "nvge;": {'\u2265', '\u20D2'}, + "nvgt;": {'\u003E', '\u20D2'}, + "nvle;": {'\u2264', '\u20D2'}, + "nvlt;": {'\u003C', '\u20D2'}, + "nvltrie;": {'\u22B4', '\u20D2'}, + "nvrtrie;": {'\u22B5', '\u20D2'}, + "nvsim;": {'\u223C', '\u20D2'}, + "race;": {'\u223D', '\u0331'}, + "smtes;": {'\u2AAC', '\uFE00'}, + "sqcaps;": {'\u2293', '\uFE00'}, + "sqcups;": {'\u2294', '\uFE00'}, + "varsubsetneq;": {'\u228A', '\uFE00'}, + "varsubsetneqq;": {'\u2ACB', '\uFE00'}, + "varsupsetneq;": {'\u228B', '\uFE00'}, + "varsupsetneqq;": {'\u2ACC', '\uFE00'}, + "vnsub;": {'\u2282', '\u20D2'}, + "vnsup;": {'\u2283', '\u20D2'}, + "vsubnE;": {'\u2ACB', '\uFE00'}, + "vsubne;": {'\u228A', '\uFE00'}, + "vsupnE;": {'\u2ACC', '\uFE00'}, + "vsupne;": {'\u228B', '\uFE00'}, +} diff --git a/src/stackdriver-nozzle/vendor/golang.org/x/net/html/escape.go b/src/stackdriver-nozzle/vendor/golang.org/x/net/html/escape.go new file mode 100644 index 00000000..d8561396 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/golang.org/x/net/html/escape.go @@ -0,0 +1,258 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "bytes" + "strings" + "unicode/utf8" +) + +// These replacements permit compatibility with old numeric entities that +// assumed Windows-1252 encoding. +// https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference +var replacementTable = [...]rune{ + '\u20AC', // First entry is what 0x80 should be replaced with. + '\u0081', + '\u201A', + '\u0192', + '\u201E', + '\u2026', + '\u2020', + '\u2021', + '\u02C6', + '\u2030', + '\u0160', + '\u2039', + '\u0152', + '\u008D', + '\u017D', + '\u008F', + '\u0090', + '\u2018', + '\u2019', + '\u201C', + '\u201D', + '\u2022', + '\u2013', + '\u2014', + '\u02DC', + '\u2122', + '\u0161', + '\u203A', + '\u0153', + '\u009D', + '\u017E', + '\u0178', // Last entry is 0x9F. + // 0x00->'\uFFFD' is handled programmatically. + // 0x0D->'\u000D' is a no-op. +} + +// unescapeEntity reads an entity like "<" from b[src:] and writes the +// corresponding "<" to b[dst:], returning the incremented dst and src cursors. +// Precondition: b[src] == '&' && dst <= src. +// attribute should be true if parsing an attribute value. +func unescapeEntity(b []byte, dst, src int, attribute bool) (dst1, src1 int) { + // https://html.spec.whatwg.org/multipage/syntax.html#consume-a-character-reference + + // i starts at 1 because we already know that s[0] == '&'. + i, s := 1, b[src:] + + if len(s) <= 1 { + b[dst] = b[src] + return dst + 1, src + 1 + } + + if s[i] == '#' { + if len(s) <= 3 { // We need to have at least "&#.". + b[dst] = b[src] + return dst + 1, src + 1 + } + i++ + c := s[i] + hex := false + if c == 'x' || c == 'X' { + hex = true + i++ + } + + x := '\x00' + for i < len(s) { + c = s[i] + i++ + if hex { + if '0' <= c && c <= '9' { + x = 16*x + rune(c) - '0' + continue + } else if 'a' <= c && c <= 'f' { + x = 16*x + rune(c) - 'a' + 10 + continue + } else if 'A' <= c && c <= 'F' { + x = 16*x + rune(c) - 'A' + 10 + continue + } + } else if '0' <= c && c <= '9' { + x = 10*x + rune(c) - '0' + continue + } + if c != ';' { + i-- + } + break + } + + if i <= 3 { // No characters matched. + b[dst] = b[src] + return dst + 1, src + 1 + } + + if 0x80 <= x && x <= 0x9F { + // Replace characters from Windows-1252 with UTF-8 equivalents. + x = replacementTable[x-0x80] + } else if x == 0 || (0xD800 <= x && x <= 0xDFFF) || x > 0x10FFFF { + // Replace invalid characters with the replacement character. + x = '\uFFFD' + } + + return dst + utf8.EncodeRune(b[dst:], x), src + i + } + + // Consume the maximum number of characters possible, with the + // consumed characters matching one of the named references. + + for i < len(s) { + c := s[i] + i++ + // Lower-cased characters are more common in entities, so we check for them first. + if 'a' <= c && c <= 'z' || 'A' <= c && c <= 'Z' || '0' <= c && c <= '9' { + continue + } + if c != ';' { + i-- + } + break + } + + entityName := string(s[1:i]) + if entityName == "" { + // No-op. + } else if attribute && entityName[len(entityName)-1] != ';' && len(s) > i && s[i] == '=' { + // No-op. + } else if x := entity[entityName]; x != 0 { + return dst + utf8.EncodeRune(b[dst:], x), src + i + } else if x := entity2[entityName]; x[0] != 0 { + dst1 := dst + utf8.EncodeRune(b[dst:], x[0]) + return dst1 + utf8.EncodeRune(b[dst1:], x[1]), src + i + } else if !attribute { + maxLen := len(entityName) - 1 + if maxLen > longestEntityWithoutSemicolon { + maxLen = longestEntityWithoutSemicolon + } + for j := maxLen; j > 1; j-- { + if x := entity[entityName[:j]]; x != 0 { + return dst + utf8.EncodeRune(b[dst:], x), src + j + 1 + } + } + } + + dst1, src1 = dst+i, src+i + copy(b[dst:dst1], b[src:src1]) + return dst1, src1 +} + +// unescape unescapes b's entities in-place, so that "a<b" becomes "a': + esc = ">" + case '"': + // """ is shorter than """. + esc = """ + case '\r': + esc = " " + default: + panic("unrecognized escape character") + } + s = s[i+1:] + if _, err := w.WriteString(esc); err != nil { + return err + } + i = strings.IndexAny(s, escapedChars) + } + _, err := w.WriteString(s) + return err +} + +// EscapeString escapes special characters like "<" to become "<". It +// escapes only five such characters: <, >, &, ' and ". +// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't +// always true. +func EscapeString(s string) string { + if strings.IndexAny(s, escapedChars) == -1 { + return s + } + var buf bytes.Buffer + escape(&buf, s) + return buf.String() +} + +// UnescapeString unescapes entities like "<" to become "<". It unescapes a +// larger range of entities than EscapeString escapes. For example, "á" +// unescapes to "á", as does "á" and "&xE1;". +// UnescapeString(EscapeString(s)) == s always holds, but the converse isn't +// always true. +func UnescapeString(s string) string { + for _, c := range s { + if c == '&' { + return string(unescape([]byte(s), false)) + } + } + return s +} diff --git a/src/stackdriver-nozzle/vendor/golang.org/x/net/html/foreign.go b/src/stackdriver-nozzle/vendor/golang.org/x/net/html/foreign.go new file mode 100644 index 00000000..d3b38440 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/golang.org/x/net/html/foreign.go @@ -0,0 +1,226 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "strings" +) + +func adjustAttributeNames(aa []Attribute, nameMap map[string]string) { + for i := range aa { + if newName, ok := nameMap[aa[i].Key]; ok { + aa[i].Key = newName + } + } +} + +func adjustForeignAttributes(aa []Attribute) { + for i, a := range aa { + if a.Key == "" || a.Key[0] != 'x' { + continue + } + switch a.Key { + case "xlink:actuate", "xlink:arcrole", "xlink:href", "xlink:role", "xlink:show", + "xlink:title", "xlink:type", "xml:base", "xml:lang", "xml:space", "xmlns:xlink": + j := strings.Index(a.Key, ":") + aa[i].Namespace = a.Key[:j] + aa[i].Key = a.Key[j+1:] + } + } +} + +func htmlIntegrationPoint(n *Node) bool { + if n.Type != ElementNode { + return false + } + switch n.Namespace { + case "math": + if n.Data == "annotation-xml" { + for _, a := range n.Attr { + if a.Key == "encoding" { + val := strings.ToLower(a.Val) + if val == "text/html" || val == "application/xhtml+xml" { + return true + } + } + } + } + case "svg": + switch n.Data { + case "desc", "foreignObject", "title": + return true + } + } + return false +} + +func mathMLTextIntegrationPoint(n *Node) bool { + if n.Namespace != "math" { + return false + } + switch n.Data { + case "mi", "mo", "mn", "ms", "mtext": + return true + } + return false +} + +// Section 12.2.5.5. +var breakout = map[string]bool{ + "b": true, + "big": true, + "blockquote": true, + "body": true, + "br": true, + "center": true, + "code": true, + "dd": true, + "div": true, + "dl": true, + "dt": true, + "em": true, + "embed": true, + "h1": true, + "h2": true, + "h3": true, + "h4": true, + "h5": true, + "h6": true, + "head": true, + "hr": true, + "i": true, + "img": true, + "li": true, + "listing": true, + "menu": true, + "meta": true, + "nobr": true, + "ol": true, + "p": true, + "pre": true, + "ruby": true, + "s": true, + "small": true, + "span": true, + "strong": true, + "strike": true, + "sub": true, + "sup": true, + "table": true, + "tt": true, + "u": true, + "ul": true, + "var": true, +} + +// Section 12.2.5.5. +var svgTagNameAdjustments = map[string]string{ + "altglyph": "altGlyph", + "altglyphdef": "altGlyphDef", + "altglyphitem": "altGlyphItem", + "animatecolor": "animateColor", + "animatemotion": "animateMotion", + "animatetransform": "animateTransform", + "clippath": "clipPath", + "feblend": "feBlend", + "fecolormatrix": "feColorMatrix", + "fecomponenttransfer": "feComponentTransfer", + "fecomposite": "feComposite", + "feconvolvematrix": "feConvolveMatrix", + "fediffuselighting": "feDiffuseLighting", + "fedisplacementmap": "feDisplacementMap", + "fedistantlight": "feDistantLight", + "feflood": "feFlood", + "fefunca": "feFuncA", + "fefuncb": "feFuncB", + "fefuncg": "feFuncG", + "fefuncr": "feFuncR", + "fegaussianblur": "feGaussianBlur", + "feimage": "feImage", + "femerge": "feMerge", + "femergenode": "feMergeNode", + "femorphology": "feMorphology", + "feoffset": "feOffset", + "fepointlight": "fePointLight", + "fespecularlighting": "feSpecularLighting", + "fespotlight": "feSpotLight", + "fetile": "feTile", + "feturbulence": "feTurbulence", + "foreignobject": "foreignObject", + "glyphref": "glyphRef", + "lineargradient": "linearGradient", + "radialgradient": "radialGradient", + "textpath": "textPath", +} + +// Section 12.2.5.1 +var mathMLAttributeAdjustments = map[string]string{ + "definitionurl": "definitionURL", +} + +var svgAttributeAdjustments = map[string]string{ + "attributename": "attributeName", + "attributetype": "attributeType", + "basefrequency": "baseFrequency", + "baseprofile": "baseProfile", + "calcmode": "calcMode", + "clippathunits": "clipPathUnits", + "contentscripttype": "contentScriptType", + "contentstyletype": "contentStyleType", + "diffuseconstant": "diffuseConstant", + "edgemode": "edgeMode", + "externalresourcesrequired": "externalResourcesRequired", + "filterres": "filterRes", + "filterunits": "filterUnits", + "glyphref": "glyphRef", + "gradienttransform": "gradientTransform", + "gradientunits": "gradientUnits", + "kernelmatrix": "kernelMatrix", + "kernelunitlength": "kernelUnitLength", + "keypoints": "keyPoints", + "keysplines": "keySplines", + "keytimes": "keyTimes", + "lengthadjust": "lengthAdjust", + "limitingconeangle": "limitingConeAngle", + "markerheight": "markerHeight", + "markerunits": "markerUnits", + "markerwidth": "markerWidth", + "maskcontentunits": "maskContentUnits", + "maskunits": "maskUnits", + "numoctaves": "numOctaves", + "pathlength": "pathLength", + "patterncontentunits": "patternContentUnits", + "patterntransform": "patternTransform", + "patternunits": "patternUnits", + "pointsatx": "pointsAtX", + "pointsaty": "pointsAtY", + "pointsatz": "pointsAtZ", + "preservealpha": "preserveAlpha", + "preserveaspectratio": "preserveAspectRatio", + "primitiveunits": "primitiveUnits", + "refx": "refX", + "refy": "refY", + "repeatcount": "repeatCount", + "repeatdur": "repeatDur", + "requiredextensions": "requiredExtensions", + "requiredfeatures": "requiredFeatures", + "specularconstant": "specularConstant", + "specularexponent": "specularExponent", + "spreadmethod": "spreadMethod", + "startoffset": "startOffset", + "stddeviation": "stdDeviation", + "stitchtiles": "stitchTiles", + "surfacescale": "surfaceScale", + "systemlanguage": "systemLanguage", + "tablevalues": "tableValues", + "targetx": "targetX", + "targety": "targetY", + "textlength": "textLength", + "viewbox": "viewBox", + "viewtarget": "viewTarget", + "xchannelselector": "xChannelSelector", + "ychannelselector": "yChannelSelector", + "zoomandpan": "zoomAndPan", +} diff --git a/src/stackdriver-nozzle/vendor/golang.org/x/net/html/node.go b/src/stackdriver-nozzle/vendor/golang.org/x/net/html/node.go new file mode 100644 index 00000000..26b657ae --- /dev/null +++ b/src/stackdriver-nozzle/vendor/golang.org/x/net/html/node.go @@ -0,0 +1,193 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "golang.org/x/net/html/atom" +) + +// A NodeType is the type of a Node. +type NodeType uint32 + +const ( + ErrorNode NodeType = iota + TextNode + DocumentNode + ElementNode + CommentNode + DoctypeNode + scopeMarkerNode +) + +// Section 12.2.3.3 says "scope markers are inserted when entering applet +// elements, buttons, object elements, marquees, table cells, and table +// captions, and are used to prevent formatting from 'leaking'". +var scopeMarker = Node{Type: scopeMarkerNode} + +// A Node consists of a NodeType and some Data (tag name for element nodes, +// content for text) and are part of a tree of Nodes. Element nodes may also +// have a Namespace and contain a slice of Attributes. Data is unescaped, so +// that it looks like "a 0 { + return (*s)[i-1] + } + return nil +} + +// index returns the index of the top-most occurrence of n in the stack, or -1 +// if n is not present. +func (s *nodeStack) index(n *Node) int { + for i := len(*s) - 1; i >= 0; i-- { + if (*s)[i] == n { + return i + } + } + return -1 +} + +// insert inserts a node at the given index. +func (s *nodeStack) insert(i int, n *Node) { + (*s) = append(*s, nil) + copy((*s)[i+1:], (*s)[i:]) + (*s)[i] = n +} + +// remove removes a node from the stack. It is a no-op if n is not present. +func (s *nodeStack) remove(n *Node) { + i := s.index(n) + if i == -1 { + return + } + copy((*s)[i:], (*s)[i+1:]) + j := len(*s) - 1 + (*s)[j] = nil + *s = (*s)[:j] +} diff --git a/src/stackdriver-nozzle/vendor/golang.org/x/net/html/parse.go b/src/stackdriver-nozzle/vendor/golang.org/x/net/html/parse.go new file mode 100644 index 00000000..be4b2bf5 --- /dev/null +++ b/src/stackdriver-nozzle/vendor/golang.org/x/net/html/parse.go @@ -0,0 +1,2094 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package html + +import ( + "errors" + "fmt" + "io" + "strings" + + a "golang.org/x/net/html/atom" +) + +// A parser implements the HTML5 parsing algorithm: +// https://html.spec.whatwg.org/multipage/syntax.html#tree-construction +type parser struct { + // tokenizer provides the tokens for the parser. + tokenizer *Tokenizer + // tok is the most recently read token. + tok Token + // Self-closing tags like
are treated as start tags, except that + // hasSelfClosingToken is set while they are being processed. + hasSelfClosingToken bool + // doc is the document root element. + doc *Node + // The stack of open elements (section 12.2.3.2) and active formatting + // elements (section 12.2.3.3). + oe, afe nodeStack + // Element pointers (section 12.2.3.4). + head, form *Node + // Other parsing state flags (section 12.2.3.5). + scripting, framesetOK bool + // im is the current insertion mode. + im insertionMode + // originalIM is the insertion mode to go back to after completing a text + // or inTableText insertion mode. + originalIM insertionMode + // fosterParenting is whether new elements should be inserted according to + // the foster parenting rules (section 12.2.5.3). + fosterParenting bool + // quirks is whether the parser is operating in "quirks mode." + quirks bool + // fragment is whether the parser is parsing an HTML fragment. + fragment bool + // context is the context element when parsing an HTML fragment + // (section 12.4). + context *Node +} + +func (p *parser) top() *Node { + if n := p.oe.top(); n != nil { + return n + } + return p.doc +} + +// Stop tags for use in popUntil. These come from section 12.2.3.2. +var ( + defaultScopeStopTags = map[string][]a.Atom{ + "": {a.Applet, a.Caption, a.Html, a.Table, a.Td, a.Th, a.Marquee, a.Object, a.Template}, + "math": {a.AnnotationXml, a.Mi, a.Mn, a.Mo, a.Ms, a.Mtext}, + "svg": {a.Desc, a.ForeignObject, a.Title}, + } +) + +type scope int + +const ( + defaultScope scope = iota + listItemScope + buttonScope + tableScope + tableRowScope + tableBodyScope + selectScope +) + +// popUntil pops the stack of open elements at the highest element whose tag +// is in matchTags, provided there is no higher element in the scope's stop +// tags (as defined in section 12.2.3.2). It returns whether or not there was +// such an element. If there was not, popUntil leaves the stack unchanged. +// +// For example, the set of stop tags for table scope is: "html", "table". If +// the stack was: +// ["html", "body", "font", "table", "b", "i", "u"] +// then popUntil(tableScope, "font") would return false, but +// popUntil(tableScope, "i") would return true and the stack would become: +// ["html", "body", "font", "table", "b"] +// +// If an element's tag is in both the stop tags and matchTags, then the stack +// will be popped and the function returns true (provided, of course, there was +// no higher element in the stack that was also in the stop tags). For example, +// popUntil(tableScope, "table") returns true and leaves: +// ["html", "body", "font"] +func (p *parser) popUntil(s scope, matchTags ...a.Atom) bool { + if i := p.indexOfElementInScope(s, matchTags...); i != -1 { + p.oe = p.oe[:i] + return true + } + return false +} + +// indexOfElementInScope returns the index in p.oe of the highest element whose +// tag is in matchTags that is in scope. If no matching element is in scope, it +// returns -1. +func (p *parser) indexOfElementInScope(s scope, matchTags ...a.Atom) int { + for i := len(p.oe) - 1; i >= 0; i-- { + tagAtom := p.oe[i].DataAtom + if p.oe[i].Namespace == "" { + for _, t := range matchTags { + if t == tagAtom { + return i + } + } + switch s { + case defaultScope: + // No-op. + case listItemScope: + if tagAtom == a.Ol || tagAtom == a.Ul { + return -1 + } + case buttonScope: + if tagAtom == a.Button { + return -1 + } + case tableScope: + if tagAtom == a.Html || tagAtom == a.Table { + return -1 + } + case selectScope: + if tagAtom != a.Optgroup && tagAtom != a.Option { + return -1 + } + default: + panic("unreachable") + } + } + switch s { + case defaultScope, listItemScope, buttonScope: + for _, t := range defaultScopeStopTags[p.oe[i].Namespace] { + if t == tagAtom { + return -1 + } + } + } + } + return -1 +} + +// elementInScope is like popUntil, except that it doesn't modify the stack of +// open elements. +func (p *parser) elementInScope(s scope, matchTags ...a.Atom) bool { + return p.indexOfElementInScope(s, matchTags...) != -1 +} + +// clearStackToContext pops elements off the stack of open elements until a +// scope-defined element is found. +func (p *parser) clearStackToContext(s scope) { + for i := len(p.oe) - 1; i >= 0; i-- { + tagAtom := p.oe[i].DataAtom + switch s { + case tableScope: + if tagAtom == a.Html || tagAtom == a.Table { + p.oe = p.oe[:i+1] + return + } + case tableRowScope: + if tagAtom == a.Html || tagAtom == a.Tr { + p.oe = p.oe[:i+1] + return + } + case tableBodyScope: + if tagAtom == a.Html || tagAtom == a.Tbody || tagAtom == a.Tfoot || tagAtom == a.Thead { + p.oe = p.oe[:i+1] + return + } + default: + panic("unreachable") + } + } +} + +// generateImpliedEndTags pops nodes off the stack of open elements as long as +// the top node has a tag name of dd, dt, li, option, optgroup, p, rp, or rt. +// If exceptions are specified, nodes with that name will not be popped off. +func (p *parser) generateImpliedEndTags(exceptions ...string) { + var i int +loop: + for i = len(p.oe) - 1; i >= 0; i-- { + n := p.oe[i] + if n.Type == ElementNode { + switch n.DataAtom { + case a.Dd, a.Dt, a.Li, a.Option, a.Optgroup, a.P, a.Rp, a.Rt: + for _, except := range exceptions { + if n.Data == except { + break loop + } + } + continue + } + } + break + } + + p.oe = p.oe[:i+1] +} + +// addChild adds a child node n to the top element, and pushes n onto the stack +// of open elements if it is an element node. +func (p *parser) addChild(n *Node) { + if p.shouldFosterParent() { + p.fosterParent(n) + } else { + p.top().AppendChild(n) + } + + if n.Type == ElementNode { + p.oe = append(p.oe, n) + } +} + +// shouldFosterParent returns whether the next node to be added should be +// foster parented. +func (p *parser) shouldFosterParent() bool { + if p.fosterParenting { + switch p.top().DataAtom { + case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr: + return true + } + } + return false +} + +// fosterParent adds a child node according to the foster parenting rules. +// Section 12.2.5.3, "foster parenting". +func (p *parser) fosterParent(n *Node) { + var table, parent, prev *Node + var i int + for i = len(p.oe) - 1; i >= 0; i-- { + if p.oe[i].DataAtom == a.Table { + table = p.oe[i] + break + } + } + + if table == nil { + // The foster parent is the html element. + parent = p.oe[0] + } else { + parent = table.Parent + } + if parent == nil { + parent = p.oe[i-1] + } + + if table != nil { + prev = table.PrevSibling + } else { + prev = parent.LastChild + } + if prev != nil && prev.Type == TextNode && n.Type == TextNode { + prev.Data += n.Data + return + } + + parent.InsertBefore(n, table) +} + +// addText adds text to the preceding node if it is a text node, or else it +// calls addChild with a new text node. +func (p *parser) addText(text string) { + if text == "" { + return + } + + if p.shouldFosterParent() { + p.fosterParent(&Node{ + Type: TextNode, + Data: text, + }) + return + } + + t := p.top() + if n := t.LastChild; n != nil && n.Type == TextNode { + n.Data += text + return + } + p.addChild(&Node{ + Type: TextNode, + Data: text, + }) +} + +// addElement adds a child element based on the current token. +func (p *parser) addElement() { + p.addChild(&Node{ + Type: ElementNode, + DataAtom: p.tok.DataAtom, + Data: p.tok.Data, + Attr: p.tok.Attr, + }) +} + +// Section 12.2.3.3. +func (p *parser) addFormattingElement() { + tagAtom, attr := p.tok.DataAtom, p.tok.Attr + p.addElement() + + // Implement the Noah's Ark clause, but with three per family instead of two. + identicalElements := 0 +findIdenticalElements: + for i := len(p.afe) - 1; i >= 0; i-- { + n := p.afe[i] + if n.Type == scopeMarkerNode { + break + } + if n.Type != ElementNode { + continue + } + if n.Namespace != "" { + continue + } + if n.DataAtom != tagAtom { + continue + } + if len(n.Attr) != len(attr) { + continue + } + compareAttributes: + for _, t0 := range n.Attr { + for _, t1 := range attr { + if t0.Key == t1.Key && t0.Namespace == t1.Namespace && t0.Val == t1.Val { + // Found a match for this attribute, continue with the next attribute. + continue compareAttributes + } + } + // If we get here, there is no attribute that matches a. + // Therefore the element is not identical to the new one. + continue findIdenticalElements + } + + identicalElements++ + if identicalElements >= 3 { + p.afe.remove(n) + } + } + + p.afe = append(p.afe, p.top()) +} + +// Section 12.2.3.3. +func (p *parser) clearActiveFormattingElements() { + for { + n := p.afe.pop() + if len(p.afe) == 0 || n.Type == scopeMarkerNode { + return + } + } +} + +// Section 12.2.3.3. +func (p *parser) reconstructActiveFormattingElements() { + n := p.afe.top() + if n == nil { + return + } + if n.Type == scopeMarkerNode || p.oe.index(n) != -1 { + return + } + i := len(p.afe) - 1 + for n.Type != scopeMarkerNode && p.oe.index(n) == -1 { + if i == 0 { + i = -1 + break + } + i-- + n = p.afe[i] + } + for { + i++ + clone := p.afe[i].clone() + p.addChild(clone) + p.afe[i] = clone + if i == len(p.afe)-1 { + break + } + } +} + +// Section 12.2.4. +func (p *parser) acknowledgeSelfClosingTag() { + p.hasSelfClosingToken = false +} + +// An insertion mode (section 12.2.3.1) is the state transition function from +// a particular state in the HTML5 parser's state machine. It updates the +// parser's fields depending on parser.tok (where ErrorToken means EOF). +// It returns whether the token was consumed. +type insertionMode func(*parser) bool + +// setOriginalIM sets the insertion mode to return to after completing a text or +// inTableText insertion mode. +// Section 12.2.3.1, "using the rules for". +func (p *parser) setOriginalIM() { + if p.originalIM != nil { + panic("html: bad parser state: originalIM was set twice") + } + p.originalIM = p.im +} + +// Section 12.2.3.1, "reset the insertion mode". +func (p *parser) resetInsertionMode() { + for i := len(p.oe) - 1; i >= 0; i-- { + n := p.oe[i] + if i == 0 && p.context != nil { + n = p.context + } + + switch n.DataAtom { + case a.Select: + p.im = inSelectIM + case a.Td, a.Th: + p.im = inCellIM + case a.Tr: + p.im = inRowIM + case a.Tbody, a.Thead, a.Tfoot: + p.im = inTableBodyIM + case a.Caption: + p.im = inCaptionIM + case a.Colgroup: + p.im = inColumnGroupIM + case a.Table: + p.im = inTableIM + case a.Head: + p.im = inBodyIM + case a.Body: + p.im = inBodyIM + case a.Frameset: + p.im = inFramesetIM + case a.Html: + p.im = beforeHeadIM + default: + continue + } + return + } + p.im = inBodyIM +} + +const whitespace = " \t\r\n\f" + +// Section 12.2.5.4.1. +func initialIM(p *parser) bool { + switch p.tok.Type { + case TextToken: + p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace) + if len(p.tok.Data) == 0 { + // It was all whitespace, so ignore it. + return true + } + case CommentToken: + p.doc.AppendChild(&Node{ + Type: CommentNode, + Data: p.tok.Data, + }) + return true + case DoctypeToken: + n, quirks := parseDoctype(p.tok.Data) + p.doc.AppendChild(n) + p.quirks = quirks + p.im = beforeHTMLIM + return true + } + p.quirks = true + p.im = beforeHTMLIM + return false +} + +// Section 12.2.5.4.2. +func beforeHTMLIM(p *parser) bool { + switch p.tok.Type { + case DoctypeToken: + // Ignore the token. + return true + case TextToken: + p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace) + if len(p.tok.Data) == 0 { + // It was all whitespace, so ignore it. + return true + } + case StartTagToken: + if p.tok.DataAtom == a.Html { + p.addElement() + p.im = beforeHeadIM + return true + } + case EndTagToken: + switch p.tok.DataAtom { + case a.Head, a.Body, a.Html, a.Br: + p.parseImpliedToken(StartTagToken, a.Html, a.Html.String()) + return false + default: + // Ignore the token. + return true + } + case CommentToken: + p.doc.AppendChild(&Node{ + Type: CommentNode, + Data: p.tok.Data, + }) + return true + } + p.parseImpliedToken(StartTagToken, a.Html, a.Html.String()) + return false +} + +// Section 12.2.5.4.3. +func beforeHeadIM(p *parser) bool { + switch p.tok.Type { + case TextToken: + p.tok.Data = strings.TrimLeft(p.tok.Data, whitespace) + if len(p.tok.Data) == 0 { + // It was all whitespace, so ignore it. + return true + } + case StartTagToken: + switch p.tok.DataAtom { + case a.Head: + p.addElement() + p.head = p.top() + p.im = inHeadIM + return true + case a.Html: + return inBodyIM(p) + } + case EndTagToken: + switch p.tok.DataAtom { + case a.Head, a.Body, a.Html, a.Br: + p.parseImpliedToken(StartTagToken, a.Head, a.Head.String()) + return false + default: + // Ignore the token. + return true + } + case CommentToken: + p.addChild(&Node{ + Type: CommentNode, + Data: p.tok.Data, + }) + return true + case DoctypeToken: + // Ignore the token. + return true + } + + p.parseImpliedToken(StartTagToken, a.Head, a.Head.String()) + return false +} + +// Section 12.2.5.4.4. +func inHeadIM(p *parser) bool { + switch p.tok.Type { + case TextToken: + s := strings.TrimLeft(p.tok.Data, whitespace) + if len(s) < len(p.tok.Data) { + // Add the initial whitespace to the current node. + p.addText(p.tok.Data[:len(p.tok.Data)-len(s)]) + if s == "" { + return true + } + p.tok.Data = s + } + case StartTagToken: + switch p.tok.DataAtom { + case a.Html: + return inBodyIM(p) + case a.Base, a.Basefont, a.Bgsound, a.Command, a.Link, a.Meta: + p.addElement() + p.oe.pop() + p.acknowledgeSelfClosingTag() + return true + case a.Script, a.Title, a.Noscript, a.Noframes, a.Style: + p.addElement() + p.setOriginalIM() + p.im = textIM + return true + case a.Head: + // Ignore the token. + return true + } + case EndTagToken: + switch p.tok.DataAtom { + case a.Head: + n := p.oe.pop() + if n.DataAtom != a.Head { + panic("html: bad parser state: element not found, in the in-head insertion mode") + } + p.im = afterHeadIM + return true + case a.Body, a.Html, a.Br: + p.parseImpliedToken(EndTagToken, a.Head, a.Head.String()) + return false + default: + // Ignore the token. + return true + } + case CommentToken: + p.addChild(&Node{ + Type: CommentNode, + Data: p.tok.Data, + }) + return true + case DoctypeToken: + // Ignore the token. + return true + } + + p.parseImpliedToken(EndTagToken, a.Head, a.Head.String()) + return false +} + +// Section 12.2.5.4.6. +func afterHeadIM(p *parser) bool { + switch p.tok.Type { + case TextToken: + s := strings.TrimLeft(p.tok.Data, whitespace) + if len(s) < len(p.tok.Data) { + // Add the initial whitespace to the current node. + p.addText(p.tok.Data[:len(p.tok.Data)-len(s)]) + if s == "" { + return true + } + p.tok.Data = s + } + case StartTagToken: + switch p.tok.DataAtom { + case a.Html: + return inBodyIM(p) + case a.Body: + p.addElement() + p.framesetOK = false + p.im = inBodyIM + return true + case a.Frameset: + p.addElement() + p.im = inFramesetIM + return true + case a.Base, a.Basefont, a.Bgsound, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Title: + p.oe = append(p.oe, p.head) + defer p.oe.remove(p.head) + return inHeadIM(p) + case a.Head: + // Ignore the token. + return true + } + case EndTagToken: + switch p.tok.DataAtom { + case a.Body, a.Html, a.Br: + // Drop down to creating an implied tag. + default: + // Ignore the token. + return true + } + case CommentToken: + p.addChild(&Node{ + Type: CommentNode, + Data: p.tok.Data, + }) + return true + case DoctypeToken: + // Ignore the token. + return true + } + + p.parseImpliedToken(StartTagToken, a.Body, a.Body.String()) + p.framesetOK = true + return false +} + +// copyAttributes copies attributes of src not found on dst to dst. +func copyAttributes(dst *Node, src Token) { + if len(src.Attr) == 0 { + return + } + attr := map[string]string{} + for _, t := range dst.Attr { + attr[t.Key] = t.Val + } + for _, t := range src.Attr { + if _, ok := attr[t.Key]; !ok { + dst.Attr = append(dst.Attr, t) + attr[t.Key] = t.Val + } + } +} + +// Section 12.2.5.4.7. +func inBodyIM(p *parser) bool { + switch p.tok.Type { + case TextToken: + d := p.tok.Data + switch n := p.oe.top(); n.DataAtom { + case a.Pre, a.Listing: + if n.FirstChild == nil { + // Ignore a newline at the start of a
 block.
+				if d != "" && d[0] == '\r' {
+					d = d[1:]
+				}
+				if d != "" && d[0] == '\n' {
+					d = d[1:]
+				}
+			}
+		}
+		d = strings.Replace(d, "\x00", "", -1)
+		if d == "" {
+			return true
+		}
+		p.reconstructActiveFormattingElements()
+		p.addText(d)
+		if p.framesetOK && strings.TrimLeft(d, whitespace) != "" {
+			// There were non-whitespace characters inserted.
+			p.framesetOK = false
+		}
+	case StartTagToken:
+		switch p.tok.DataAtom {
+		case a.Html:
+			copyAttributes(p.oe[0], p.tok)
+		case a.Base, a.Basefont, a.Bgsound, a.Command, a.Link, a.Meta, a.Noframes, a.Script, a.Style, a.Title:
+			return inHeadIM(p)
+		case a.Body:
+			if len(p.oe) >= 2 {
+				body := p.oe[1]
+				if body.Type == ElementNode && body.DataAtom == a.Body {
+					p.framesetOK = false
+					copyAttributes(body, p.tok)
+				}
+			}
+		case a.Frameset:
+			if !p.framesetOK || len(p.oe) < 2 || p.oe[1].DataAtom != a.Body {
+				// Ignore the token.
+				return true
+			}
+			body := p.oe[1]
+			if body.Parent != nil {
+				body.Parent.RemoveChild(body)
+			}
+			p.oe = p.oe[:1]
+			p.addElement()
+			p.im = inFramesetIM
+			return true
+		case a.Address, a.Article, a.Aside, a.Blockquote, a.Center, a.Details, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Menu, a.Nav, a.Ol, a.P, a.Section, a.Summary, a.Ul:
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+		case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
+			p.popUntil(buttonScope, a.P)
+			switch n := p.top(); n.DataAtom {
+			case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
+				p.oe.pop()
+			}
+			p.addElement()
+		case a.Pre, a.Listing:
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+			// The newline, if any, will be dealt with by the TextToken case.
+			p.framesetOK = false
+		case a.Form:
+			if p.form == nil {
+				p.popUntil(buttonScope, a.P)
+				p.addElement()
+				p.form = p.top()
+			}
+		case a.Li:
+			p.framesetOK = false
+			for i := len(p.oe) - 1; i >= 0; i-- {
+				node := p.oe[i]
+				switch node.DataAtom {
+				case a.Li:
+					p.oe = p.oe[:i]
+				case a.Address, a.Div, a.P:
+					continue
+				default:
+					if !isSpecialElement(node) {
+						continue
+					}
+				}
+				break
+			}
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+		case a.Dd, a.Dt:
+			p.framesetOK = false
+			for i := len(p.oe) - 1; i >= 0; i-- {
+				node := p.oe[i]
+				switch node.DataAtom {
+				case a.Dd, a.Dt:
+					p.oe = p.oe[:i]
+				case a.Address, a.Div, a.P:
+					continue
+				default:
+					if !isSpecialElement(node) {
+						continue
+					}
+				}
+				break
+			}
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+		case a.Plaintext:
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+		case a.Button:
+			p.popUntil(defaultScope, a.Button)
+			p.reconstructActiveFormattingElements()
+			p.addElement()
+			p.framesetOK = false
+		case a.A:
+			for i := len(p.afe) - 1; i >= 0 && p.afe[i].Type != scopeMarkerNode; i-- {
+				if n := p.afe[i]; n.Type == ElementNode && n.DataAtom == a.A {
+					p.inBodyEndTagFormatting(a.A)
+					p.oe.remove(n)
+					p.afe.remove(n)
+					break
+				}
+			}
+			p.reconstructActiveFormattingElements()
+			p.addFormattingElement()
+		case a.B, a.Big, a.Code, a.Em, a.Font, a.I, a.S, a.Small, a.Strike, a.Strong, a.Tt, a.U:
+			p.reconstructActiveFormattingElements()
+			p.addFormattingElement()
+		case a.Nobr:
+			p.reconstructActiveFormattingElements()
+			if p.elementInScope(defaultScope, a.Nobr) {
+				p.inBodyEndTagFormatting(a.Nobr)
+				p.reconstructActiveFormattingElements()
+			}
+			p.addFormattingElement()
+		case a.Applet, a.Marquee, a.Object:
+			p.reconstructActiveFormattingElements()
+			p.addElement()
+			p.afe = append(p.afe, &scopeMarker)
+			p.framesetOK = false
+		case a.Table:
+			if !p.quirks {
+				p.popUntil(buttonScope, a.P)
+			}
+			p.addElement()
+			p.framesetOK = false
+			p.im = inTableIM
+			return true
+		case a.Area, a.Br, a.Embed, a.Img, a.Input, a.Keygen, a.Wbr:
+			p.reconstructActiveFormattingElements()
+			p.addElement()
+			p.oe.pop()
+			p.acknowledgeSelfClosingTag()
+			if p.tok.DataAtom == a.Input {
+				for _, t := range p.tok.Attr {
+					if t.Key == "type" {
+						if strings.ToLower(t.Val) == "hidden" {
+							// Skip setting framesetOK = false
+							return true
+						}
+					}
+				}
+			}
+			p.framesetOK = false
+		case a.Param, a.Source, a.Track:
+			p.addElement()
+			p.oe.pop()
+			p.acknowledgeSelfClosingTag()
+		case a.Hr:
+			p.popUntil(buttonScope, a.P)
+			p.addElement()
+			p.oe.pop()
+			p.acknowledgeSelfClosingTag()
+			p.framesetOK = false
+		case a.Image:
+			p.tok.DataAtom = a.Img
+			p.tok.Data = a.Img.String()
+			return false
+		case a.Isindex:
+			if p.form != nil {
+				// Ignore the token.
+				return true
+			}
+			action := ""
+			prompt := "This is a searchable index. Enter search keywords: "
+			attr := []Attribute{{Key: "name", Val: "isindex"}}
+			for _, t := range p.tok.Attr {
+				switch t.Key {
+				case "action":
+					action = t.Val
+				case "name":
+					// Ignore the attribute.
+				case "prompt":
+					prompt = t.Val
+				default:
+					attr = append(attr, t)
+				}
+			}
+			p.acknowledgeSelfClosingTag()
+			p.popUntil(buttonScope, a.P)
+			p.parseImpliedToken(StartTagToken, a.Form, a.Form.String())
+			if action != "" {
+				p.form.Attr = []Attribute{{Key: "action", Val: action}}
+			}
+			p.parseImpliedToken(StartTagToken, a.Hr, a.Hr.String())
+			p.parseImpliedToken(StartTagToken, a.Label, a.Label.String())
+			p.addText(prompt)
+			p.addChild(&Node{
+				Type:     ElementNode,
+				DataAtom: a.Input,
+				Data:     a.Input.String(),
+				Attr:     attr,
+			})
+			p.oe.pop()
+			p.parseImpliedToken(EndTagToken, a.Label, a.Label.String())
+			p.parseImpliedToken(StartTagToken, a.Hr, a.Hr.String())
+			p.parseImpliedToken(EndTagToken, a.Form, a.Form.String())
+		case a.Textarea:
+			p.addElement()
+			p.setOriginalIM()
+			p.framesetOK = false
+			p.im = textIM
+		case a.Xmp:
+			p.popUntil(buttonScope, a.P)
+			p.reconstructActiveFormattingElements()
+			p.framesetOK = false
+			p.addElement()
+			p.setOriginalIM()
+			p.im = textIM
+		case a.Iframe:
+			p.framesetOK = false
+			p.addElement()
+			p.setOriginalIM()
+			p.im = textIM
+		case a.Noembed, a.Noscript:
+			p.addElement()
+			p.setOriginalIM()
+			p.im = textIM
+		case a.Select:
+			p.reconstructActiveFormattingElements()
+			p.addElement()
+			p.framesetOK = false
+			p.im = inSelectIM
+			return true
+		case a.Optgroup, a.Option:
+			if p.top().DataAtom == a.Option {
+				p.oe.pop()
+			}
+			p.reconstructActiveFormattingElements()
+			p.addElement()
+		case a.Rp, a.Rt:
+			if p.elementInScope(defaultScope, a.Ruby) {
+				p.generateImpliedEndTags()
+			}
+			p.addElement()
+		case a.Math, a.Svg:
+			p.reconstructActiveFormattingElements()
+			if p.tok.DataAtom == a.Math {
+				adjustAttributeNames(p.tok.Attr, mathMLAttributeAdjustments)
+			} else {
+				adjustAttributeNames(p.tok.Attr, svgAttributeAdjustments)
+			}
+			adjustForeignAttributes(p.tok.Attr)
+			p.addElement()
+			p.top().Namespace = p.tok.Data
+			if p.hasSelfClosingToken {
+				p.oe.pop()
+				p.acknowledgeSelfClosingTag()
+			}
+			return true
+		case a.Caption, a.Col, a.Colgroup, a.Frame, a.Head, a.Tbody, a.Td, a.Tfoot, a.Th, a.Thead, a.Tr:
+			// Ignore the token.
+		default:
+			p.reconstructActiveFormattingElements()
+			p.addElement()
+		}
+	case EndTagToken:
+		switch p.tok.DataAtom {
+		case a.Body:
+			if p.elementInScope(defaultScope, a.Body) {
+				p.im = afterBodyIM
+			}
+		case a.Html:
+			if p.elementInScope(defaultScope, a.Body) {
+				p.parseImpliedToken(EndTagToken, a.Body, a.Body.String())
+				return false
+			}
+			return true
+		case a.Address, a.Article, a.Aside, a.Blockquote, a.Button, a.Center, a.Details, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Listing, a.Menu, a.Nav, a.Ol, a.Pre, a.Section, a.Summary, a.Ul:
+			p.popUntil(defaultScope, p.tok.DataAtom)
+		case a.Form:
+			node := p.form
+			p.form = nil
+			i := p.indexOfElementInScope(defaultScope, a.Form)
+			if node == nil || i == -1 || p.oe[i] != node {
+				// Ignore the token.
+				return true
+			}
+			p.generateImpliedEndTags()
+			p.oe.remove(node)
+		case a.P:
+			if !p.elementInScope(buttonScope, a.P) {
+				p.parseImpliedToken(StartTagToken, a.P, a.P.String())
+			}
+			p.popUntil(buttonScope, a.P)
+		case a.Li:
+			p.popUntil(listItemScope, a.Li)
+		case a.Dd, a.Dt:
+			p.popUntil(defaultScope, p.tok.DataAtom)
+		case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
+			p.popUntil(defaultScope, a.H1, a.H2, a.H3, a.H4, a.H5, a.H6)
+		case a.A, a.B, a.Big, a.Code, a.Em, a.Font, a.I, a.Nobr, a.S, a.Small, a.Strike, a.Strong, a.Tt, a.U:
+			p.inBodyEndTagFormatting(p.tok.DataAtom)
+		case a.Applet, a.Marquee, a.Object:
+			if p.popUntil(defaultScope, p.tok.DataAtom) {
+				p.clearActiveFormattingElements()
+			}
+		case a.Br:
+			p.tok.Type = StartTagToken
+			return false
+		default:
+			p.inBodyEndTagOther(p.tok.DataAtom)
+		}
+	case CommentToken:
+		p.addChild(&Node{
+			Type: CommentNode,
+			Data: p.tok.Data,
+		})
+	}
+
+	return true
+}
+
+func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom) {
+	// This is the "adoption agency" algorithm, described at
+	// https://html.spec.whatwg.org/multipage/syntax.html#adoptionAgency
+
+	// TODO: this is a fairly literal line-by-line translation of that algorithm.
+	// Once the code successfully parses the comprehensive test suite, we should
+	// refactor this code to be more idiomatic.
+
+	// Steps 1-4. The outer loop.
+	for i := 0; i < 8; i++ {
+		// Step 5. Find the formatting element.
+		var formattingElement *Node
+		for j := len(p.afe) - 1; j >= 0; j-- {
+			if p.afe[j].Type == scopeMarkerNode {
+				break
+			}
+			if p.afe[j].DataAtom == tagAtom {
+				formattingElement = p.afe[j]
+				break
+			}
+		}
+		if formattingElement == nil {
+			p.inBodyEndTagOther(tagAtom)
+			return
+		}
+		feIndex := p.oe.index(formattingElement)
+		if feIndex == -1 {
+			p.afe.remove(formattingElement)
+			return
+		}
+		if !p.elementInScope(defaultScope, tagAtom) {
+			// Ignore the tag.
+			return
+		}
+
+		// Steps 9-10. Find the furthest block.
+		var furthestBlock *Node
+		for _, e := range p.oe[feIndex:] {
+			if isSpecialElement(e) {
+				furthestBlock = e
+				break
+			}
+		}
+		if furthestBlock == nil {
+			e := p.oe.pop()
+			for e != formattingElement {
+				e = p.oe.pop()
+			}
+			p.afe.remove(e)
+			return
+		}
+
+		// Steps 11-12. Find the common ancestor and bookmark node.
+		commonAncestor := p.oe[feIndex-1]
+		bookmark := p.afe.index(formattingElement)
+
+		// Step 13. The inner loop. Find the lastNode to reparent.
+		lastNode := furthestBlock
+		node := furthestBlock
+		x := p.oe.index(node)
+		// Steps 13.1-13.2
+		for j := 0; j < 3; j++ {
+			// Step 13.3.
+			x--
+			node = p.oe[x]
+			// Step 13.4 - 13.5.
+			if p.afe.index(node) == -1 {
+				p.oe.remove(node)
+				continue
+			}
+			// Step 13.6.
+			if node == formattingElement {
+				break
+			}
+			// Step 13.7.
+			clone := node.clone()
+			p.afe[p.afe.index(node)] = clone
+			p.oe[p.oe.index(node)] = clone
+			node = clone
+			// Step 13.8.
+			if lastNode == furthestBlock {
+				bookmark = p.afe.index(node) + 1
+			}
+			// Step 13.9.
+			if lastNode.Parent != nil {
+				lastNode.Parent.RemoveChild(lastNode)
+			}
+			node.AppendChild(lastNode)
+			// Step 13.10.
+			lastNode = node
+		}
+
+		// Step 14. Reparent lastNode to the common ancestor,
+		// or for misnested table nodes, to the foster parent.
+		if lastNode.Parent != nil {
+			lastNode.Parent.RemoveChild(lastNode)
+		}
+		switch commonAncestor.DataAtom {
+		case a.Table, a.Tbody, a.Tfoot, a.Thead, a.Tr:
+			p.fosterParent(lastNode)
+		default:
+			commonAncestor.AppendChild(lastNode)
+		}
+
+		// Steps 15-17. Reparent nodes from the furthest block's children
+		// to a clone of the formatting element.
+		clone := formattingElement.clone()
+		reparentChildren(clone, furthestBlock)
+		furthestBlock.AppendChild(clone)
+
+		// Step 18. Fix up the list of active formatting elements.
+		if oldLoc := p.afe.index(formattingElement); oldLoc != -1 && oldLoc < bookmark {
+			// Move the bookmark with the rest of the list.
+			bookmark--
+		}
+		p.afe.remove(formattingElement)
+		p.afe.insert(bookmark, clone)
+
+		// Step 19. Fix up the stack of open elements.
+		p.oe.remove(formattingElement)
+		p.oe.insert(p.oe.index(furthestBlock)+1, clone)
+	}
+}
+
+// inBodyEndTagOther performs the "any other end tag" algorithm for inBodyIM.
+// "Any other end tag" handling from 12.2.5.5 The rules for parsing tokens in foreign content
+// https://html.spec.whatwg.org/multipage/syntax.html#parsing-main-inforeign
+func (p *parser) inBodyEndTagOther(tagAtom a.Atom) {
+	for i := len(p.oe) - 1; i >= 0; i-- {
+		if p.oe[i].DataAtom == tagAtom {
+			p.oe = p.oe[:i]
+			break
+		}
+		if isSpecialElement(p.oe[i]) {
+			break
+		}
+	}
+}
+
+// Section 12.2.5.4.8.
+func textIM(p *parser) bool {
+	switch p.tok.Type {
+	case ErrorToken:
+		p.oe.pop()
+	case TextToken:
+		d := p.tok.Data
+		if n := p.oe.top(); n.DataAtom == a.Textarea && n.FirstChild == nil {
+			// Ignore a newline at the start of a