diff --git a/Gopkg.lock b/Gopkg.lock index ea18540cd4..9f6565ebce 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -26,25 +26,29 @@ revision = "3a771d992973f24aa725d07868b467d1ddfceafb" [[projects]] - digest = "1:902544577dcb868a5ae31529d73a1ce5031e224923290caedf6176241ee304e0" + digest = "1:f2d3857204e90f618f155eef24533ac205b1ce7b3570e21502ad0f0c2f6c288e" name = "github.com/cloudevents/sdk-go" packages = [ - ".", - "pkg/cloudevents", - "pkg/cloudevents/client", - "pkg/cloudevents/context", - "pkg/cloudevents/datacodec", - "pkg/cloudevents/datacodec/json", - "pkg/cloudevents/datacodec/text", - "pkg/cloudevents/datacodec/xml", - "pkg/cloudevents/observability", - "pkg/cloudevents/transport", - "pkg/cloudevents/transport/http", - "pkg/cloudevents/types", + "v2", + "v2/binding", + "v2/binding/format", + "v2/binding/spec", + "v2/client", + "v2/context", + "v2/event", + "v2/event/datacodec", + "v2/event/datacodec/json", + "v2/event/datacodec/text", + "v2/event/datacodec/xml", + "v2/extensions", + "v2/observability", + "v2/protocol", + "v2/protocol/http", + "v2/types", ] pruneopts = "NUT" - revision = "2fa4bb1fbb4aac4d906b0173a2a408f701439b82" - version = "v0.10.0" + revision = "6dc020a8df7f3ee38d729e53cde2193ea7edf12a" + version = "v2.0.0-preview8" [[projects]] digest = "1:7a6852b35eb5bbc184561443762d225116ae630c26a7c4d90546619f1e7d2ad2" @@ -143,6 +147,14 @@ revision = "e3702bed27f0d39777b0b37b664b6280e8ef8fbf" version = "v1.6.2" +[[projects]] + digest = "1:7fae9ec96d10b2afce0da23c378c8b3389319b7f92fa092f2621bba3078cfb4b" + name = "github.com/hashicorp/golang-lru" + packages = ["simplelru"] + pruneopts = "NUT" + revision = "14eae340515388ca95aa8e7b86f0de668e981f54" + version = "v0.5.4" + [[projects]] digest = "1:08c58ac78a8c1f61e9a96350066d30fe194b8779799bd932a79932a5166a173f" name = "github.com/kelseyhightower/envconfig" @@ -163,6 +175,17 @@ pruneopts = "NUT" revision = "e8a4306a5d37d2ea18705dbd592ecf1fa9264191" +[[projects]] + branch = "master" + digest = "1:a12508addb76a16593d8a3cee41d782fdf738f727d421f0bcc2dd2ee76821c01" + name = "github.com/lightstep/tracecontext.go" + packages = [ + "traceparent", + "tracestate", + ] + pruneopts = "NUT" + revision = "1757c391b1acf4147823503f13e003115ea4e5df" + [[projects]] digest = "1:5985ef4caf91ece5d54817c11ea25f182697534f8ae6521eadcd628c142ac4b6" name = "github.com/matttproud/golang_protobuf_extensions" @@ -186,6 +209,14 @@ revision = "f197ec29e729f226d23370ea60f0e49b8f44ccf4" version = "v0.1.0" +[[projects]] + digest = "1:4047c378584616813d610c9f993bf90dd0d07aed8d94bd3bc299cd35ececdcba" + name = "github.com/pkg/errors" + packages = ["."] + pruneopts = "NUT" + revision = "614d223910a179a466c1767a985424175c39b465" + version = "v0.9.1" + [[projects]] digest = "1:03bca087b180bf24c4f9060775f137775550a0834e18f0bca0520a868679dbd7" name = "github.com/prometheus/client_golang" @@ -247,7 +278,7 @@ version = "v1.0.0" [[projects]] - digest = "1:bb38c0571e5ffeb394f2a7e4056fa5a7a6ea1acabb7fe71976340719fd104d02" + digest = "1:2fe273976b8123b7fcd5d49a7ecbf340b92f370a727bed427ded821f68584d63" name = "go.opencensus.io" packages = [ ".", @@ -255,8 +286,12 @@ "exporter/zipkin", "internal", "internal/tagencoding", + "metric/metricdata", + "metric/metricproducer", "plugin/ochttp", "plugin/ochttp/propagation/b3", + "plugin/ochttp/propagation/tracecontext", + "resource", "stats", "stats/internal", "stats/view", @@ -264,10 +299,11 @@ "trace", "trace/internal", "trace/propagation", + "trace/tracestate", ] pruneopts = "NUT" - revision = "e262766cd0d230a1bb7c37281e345e465f19b41b" - version = "v0.14.0" + revision = "75c0cca22312e51bfd4fafdbe9197ae399e18b38" + version = "v0.20.2" [[projects]] digest = "1:cc9d86ec4e6e3bdf87e3a421273bfeed003cf8e21351c0302fe8b0eb7b10efe6" @@ -491,7 +527,7 @@ analyzer-version = 1 input-imports = [ "cloud.google.com/go/storage", - "github.com/cloudevents/sdk-go", + "github.com/cloudevents/sdk-go/v2", "github.com/dgrijalva/jwt-go", "github.com/eclipse/paho.mqtt.golang", "github.com/golang/protobuf/proto", diff --git a/Gopkg.toml b/Gopkg.toml index 031afa695b..58e20ede5a 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -14,3 +14,7 @@ required = [ [[prune.project]] name = "knative.dev/test-infra" non-go = false + +[[constraint]] + name = "github.com/cloudevents/sdk-go" + version = "v2.0.0-RC1" diff --git a/community/samples/README.md b/community/samples/README.md index c3aa58104f..84c1a84d5f 100644 --- a/community/samples/README.md +++ b/community/samples/README.md @@ -16,6 +16,7 @@ Knative Serving sample apps. | Sample Name | Description | Language(s) | | ----------- | ----------- | ----------- | | Hello World | A quick introduction to Knative Serving that highlights how to deploy an app. | [Clojure](./serving/helloworld-clojure/README.md), [Dart](./serving/helloworld-dart/README.md), [Elixir](./serving/helloworld-elixir/README.md), [Haskell](./serving/helloworld-haskell/README.md), [Java - Micronaut](./serving/helloworld-java-micronaut/README.md), [Java - Quarkus](./serving/helloworld-java-quarkus/README.md), [R - Go Server](./serving/helloworld-r/README.md), [Rust](./serving/helloworld-rust/README.md), [Swift](./serving/helloworld-swift/README.md), [Vertx](./serving/helloworld-vertx/README.md) | +| Machine Learning | A quick introduction to using Knative Serving to serve machine learning models | [Python - BentoML](./serving/machinelearning-python-bentoml) #### Eventing and Eventing Resources samples diff --git a/community/samples/serving/machinelearning-python-bentoml/README.md b/community/samples/serving/machinelearning-python-bentoml/README.md new file mode 100644 index 0000000000..6c867e0ab7 --- /dev/null +++ b/community/samples/serving/machinelearning-python-bentoml/README.md @@ -0,0 +1,193 @@ +--- +title: "Hello World - Python BentoML" +linkTitle: "Python Bentoml" +weight: 1 +type: "docs" +--- + +A simple machine learning model with API serving that is written in python and +using [BentoML](https://github.com/bentoml/BentoML). BentoML is an open source +framework for high performance ML model serving, which supports all major machine +learning frameworks including Keras, Tensorflow, PyTorch, Fast.ai, XGBoost and etc. + +This sample will walk you through the steps of creating and deploying a machine learning +model using python. It will use BentoML to package a classifier model trained +on the Iris dataset. Afterward, it will create a container image and +deploy the image to Knative. + +Knative deployment guide with BentoML is also available in the +[BentoML documentation](https://docs.bentoml.org/en/latest/deployment/knative.html) + +## Before you begin + +- A Kubernetes cluster with Knative installed. Follow the + [installation instructions](../../../../docs/install/README.md) if you need to + create one. +- [Docker](https://www.docker.com) installed and running on your local machine, + and a Docker Hub account configured. Docker Hub will be used for a container registry). +- Python 3.6 or above installed and running on your local machine. + - Install `scikit-learn` and `bentoml` packages: + + ```shell + pip install scikit-learn + pip install bentoml + ``` + +## Recreating sample code + +Run the following code on your local machine, to train a machine learning model and deploy it +as API endpoint with KNative Serving. + +1. BentoML creates a model API server, via prediction service abstraction. In + `iris_classifier.py`, it defines a prediction service that requires a scikit-learn + model, asks BentoML to figure out the required pip dependencies, also defines an + API, which is the entry point for accessing this machine learning service. + + {{% readfile file="iris_classifier.py" %}} + +2. In `main.py`, it uses the classic + [iris flower data set](https://en.wikipedia.org/wiki/Iris_flower_data_set) + to train a classification model which can predict the species of an iris flower with + given data and then save the model with BentoML to local disk. + + {{% readfile file="main.py" %}} + + Run the `main.py` file to train and save the model: + + ```shell + python main.py + ``` + +3. Use BentoML CLI to check saved model's information. + + ```shell + bentoml get IrisClassifier:latest + ``` + + Example: + + ```shell + > bentoml get IrisClassifier:latest + { + "name": "IrisClassifier", + "version": "20200305171229_0A1411", + "uri": { + "type": "LOCAL", + "uri": "/Users/bozhaoyu/bentoml/repository/IrisClassifier/20200305171229_0A1411" + }, + "bentoServiceMetadata": { + "name": "IrisClassifier", + "version": "20200305171229_0A1411", + "createdAt": "2020-03-06T01:12:49.431011Z", + "env": { + "condaEnv": "name: bentoml-IrisClassifier\nchannels:\n- defaults\ndependencies:\n- python=3.7.3\n- pip\n", + "pipDependencies": "bentoml==0.6.2\nscikit-learn", + "pythonVersion": "3.7.3" + }, + "artifacts": [ + { + "name": "model", + "artifactType": "SklearnModelArtifact" + } + ], + "apis": [ + { + "name": "predict", + "handlerType": "DataframeHandler", + "docs": "BentoService API", + "handlerConfig": { + "orient": "records", + "typ": "frame", + "input_dtypes": null, + "output_orient": "records" + } + } + ] + } + } + ``` + +4. Test run API server. BentoML can start an API server from the saved model. Use + BentoML CLI command to start an API server locally and test it with the `curl` command. + + ```shell + bentoml serve IrisClassifier:latest + ``` + + In another terminal window, make `curl` request with sample data to the API server + and get prediction result: + + ```shell + curl -v -i \ + --header "Content-Type: application/json" \ + --request POST \ + --data '[[5.1, 3.5, 1.4, 0.2]]' \ + 127.0.0.1:5000/predict + ``` + +## Building and deploying the sample + +BentoML supports creating an API server docker image from its saved model directory, where +a Dockerfile is automatically generated when saving the model. + +1. To build an API model server docker image, replace `{username}` with your Docker Hub + username and run the following commands. + + ```shell + # jq might not be installed on your local system, please follow jq install + # instruction at https://stedolan.github.io/jq/download/ + saved_path=$(bentoml get IrisClassifier:latest -q | jq -r ".uri.uri") + + # Build the container on your local machine + docker build - t {username}/iris-classifier $saved_path + + # Push the container to docker registry + docker push {username}/iris-classifier + ``` + +2. In `service.yaml`, replace `{username}` with your Docker hub username, and then deploy + the service to Knative Serving with `kubectl`: + + {{% readfile file="service.yaml" %}} + + ```shell + kubectl apply --filename service.yaml + ``` + +3. Now that your service is created, Knative performs the following steps: + + - Create a new immutable revision for this version of the app. + - Network programming to create a route, ingress, service, and load + balance for your application. + - Automatically scale your pods up and down (including to zero active + pods). + +4. Run the following command to find the domain URL for your service: + + ```shell + kubectl get ksvc iris-classifier --output=custom-columns=NAME:.metadata.name,URL:.status.url + + NAME URL + iris-classifier http://iris-classifer.default.example.com + ``` + +5. Replace the request URL with the URL return in the previous command, and execute the + command to get prediction result from the deployed model API endpoint. + + ```shell + curl -v -i \ + --header "Content-Type: application/json" \ + --request POST \ + --data '[[5.1, 3.5, 1.4, 0.2]]' \ + http://iris-classifier.default.example.com/predict + + [0] + ``` + +## Removing the sample app deployment + +To remove the application from your cluster, delete the service record: + + ```shell + kubectl delete --filename service.yaml + ``` diff --git a/community/samples/serving/machinelearning-python-bentoml/iris_classifier.py b/community/samples/serving/machinelearning-python-bentoml/iris_classifier.py new file mode 100644 index 0000000000..c94c386f84 --- /dev/null +++ b/community/samples/serving/machinelearning-python-bentoml/iris_classifier.py @@ -0,0 +1,11 @@ +from bentoml import env, artifacts, api, BentoService +from bentoml.handlers import DataframeHandler +from bentoml.artifact import SklearnModelArtifact + +@env(auto_pip_dependencies=True) +@artifacts([SklearnModelArtifact('model')]) +class IrisClassifier(BentoService): + + @api(DataframeHandler) + def predict(self, df): + return self.artifacts.model.predict(df) diff --git a/community/samples/serving/machinelearning-python-bentoml/main.py b/community/samples/serving/machinelearning-python-bentoml/main.py new file mode 100644 index 0000000000..b5bb8c0c72 --- /dev/null +++ b/community/samples/serving/machinelearning-python-bentoml/main.py @@ -0,0 +1,22 @@ +from sklearn import svm +from sklearn import datasets + +from iris_classifier import IrisClassifier + +if __name__ == "__main__": + # Load training data + iris = datasets.load_iris() + X, y = iris.data, iris.target + + # Model Training + clf = svm.SVC(gamma='scale') + clf.fit(X, y) + + # Create a iris classifier service instance + iris_classifier_service = IrisClassifier() + + # Pack the newly trained model artifact + iris_classifier_service.pack('model', clf) + + # Save the prediction service to disk for model serving + saved_path = iris_classifier_service.save() diff --git a/community/samples/serving/machinelearning-python-bentoml/service.yaml b/community/samples/serving/machinelearning-python-bentoml/service.yaml new file mode 100644 index 0000000000..732a9756f4 --- /dev/null +++ b/community/samples/serving/machinelearning-python-bentoml/service.yaml @@ -0,0 +1,24 @@ +apiVersion: serving.knative.dev/v1 +kind: Service +metadata: + name: iris-classifier + namespace: default +spec: + template: + spec: + containers: + - image: docker.io/{username}/iris-classifier + ports: + - containerPort: 5000 # Port to route to + livenessProbe: + httpGet: + path: /healthz + initialDelaySeconds: 3 + periodSeconds: 5 + readinessProbe: + httpGet: + path: /healthz + initialDelaySeconds: 3 + periodSeconds: 5 + failureThreshold: 3 + timeoutSeconds: 60 diff --git a/docs/eventing/samples/helloworld/helloworld-go/README.md b/docs/eventing/samples/helloworld/helloworld-go/README.md index 3da63d1fc6..203f997b45 100644 --- a/docs/eventing/samples/helloworld/helloworld-go/README.md +++ b/docs/eventing/samples/helloworld/helloworld-go/README.md @@ -1,7 +1,15 @@ -A simple web app written in Go that you can use to test knative eventing. It shows how to consume a [CloudEvent](https://cloudevents.io/) in Knative eventing, and optionally how to respond back with another CloudEvent in the http response, using the [Go SDK for CloudEvents](https://github.com/cloudevents/sdk-go) - -We will deploy the app as a [Kubernetes Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) along with a [Kubernetes Service](https://kubernetes.io/docs/concepts/services-networking/service/). -However, you can also deploy the app as a [Knative Serving Service](../../../../serving/README.md). +A simple web app written in Go that you can use to test knative eventing. It +shows how to consume a [CloudEvent](https://cloudevents.io/) in Knative +eventing, and optionally how to respond back with another CloudEvent in the http +response, using the +[Go SDK for CloudEvents](https://github.com/cloudevents/sdk-go) + +We will deploy the app as a +[Kubernetes Deployment](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/) +along with a +[Kubernetes Service](https://kubernetes.io/docs/concepts/services-networking/service/). +However, you can also deploy the app as a +[Knative Serving Service](../../../../serving/README.md). Follow the steps below to create the sample code and then deploy the app to your cluster. You can also download a working copy of the sample, by running the @@ -14,7 +22,9 @@ cd knative-docs/docs/eventing/samples/helloworld/helloworld-go ## Before you begin -- A Kubernetes cluster with [Knative Eventing](../../../getting-started.md#installing-knative-eventing) installed. +- A Kubernetes cluster with + [Knative Eventing](../../../getting-started.md#installing-knative-eventing) + installed. - [Docker](https://www.docker.com) installed and running on your local machine, and a Docker Hub account configured (we'll use it for a container registry). @@ -24,68 +34,51 @@ cd knative-docs/docs/eventing/samples/helloworld/helloworld-go code creates a basic web server which listens on port 8080: ```go - package main - import ( - "context" - "fmt" - "log" - "net/http" - "os" - - cloudevents "github.com/cloudevents/sdk-go" - "github.com/google/uuid" - ) + "context" + "log" - type eventData struct { - Message string `json:"message,omitempty,string"` - } - - func receive(ctx context.Context, event cloudevents.Event, response *cloudevents.EventResponse) error { - // Here is where your code to process the event will go. - // In this example we will log the event msg - log.Printf("Event Context: %+v\n", event.Context) - data := &HelloWorld{} - if err := event.DataAs(data); err != nil { - log.Printf("Error while extracting cloudevent Data: %s\n", err.Error()) - return err - } - log.Printf("Hello World Message %q", data.Msg) - - // Respond with another event (optional) - // This is optional and is intended to show how to respond back with another event after processing. - // The response will go back into the knative eventing system just like any other event - newEvent := cloudevents.NewEvent() - newEvent.SetID(uuid.New().String()) - newEvent.SetSource("knative/eventing/samples/hello-world") - newEvent.SetType("dev.knative.samples.hifromknative") - newEvent.SetData(HiFromKnative{Msg: "Hi from Knative!"}) - response.RespondWith(200, &newEvent) - - log.Printf("Responded with event %v", newEvent) - - return nil - } + cloudevents "github.com/cloudevents/sdk-go/v2" + "github.com/google/uuid" + ) - func handler(w http.ResponseWriter, r *http.Request) { - log.Print("Hello world received a request.") - target := os.Getenv("TARGET") - if target == "" { - target = "World" - } - fmt.Fprintf(w, "Hello %s!\n", target) + func receive(ctx context.Context, event cloudevents.Event) (*cloudevents.Event, cloudevents.Result) { + // Here is where your code to process the event will go. + // In this example we will log the event msg + log.Printf("Event received. \n%s\n", event) + data := &HelloWorld{} + if err := event.DataAs(data); err != nil { + log.Printf("Error while extracting cloudevent Data: %s\n", err.Error()) + return nil, cloudevents.NewHTTPResult(400, "failed to convert data: %s", err) + } + log.Printf("Hello World Message from received event %q", data.Msg) + + // Respond with another event (optional) + // This is optional and is intended to show how to respond back with another event after processing. + // The response will go back into the knative eventing system just like any other event + newEvent := cloudevents.NewEvent() + newEvent.SetID(uuid.New().String()) + newEvent.SetSource("knative/eventing/samples/hello-world") + newEvent.SetType("dev.knative.samples.hifromknative") + if err := newEvent.SetData(cloudevents.ApplicationJSON, HiFromKnative{Msg: "Hi from helloworld-go app!"}); err != nil { + return nil, cloudevents.NewHTTPResult(500, "failed to set response data: %s", err) + } + log.Printf("Responding with event\n%s\n", newEvent) + return &newEvent, nil } func main() { - log.Print("Hello world sample started.") - c, err := cloudevents.NewDefaultClient() - if err != nil { - log.Fatalf("failed to create client, %v", err) - } - log.Fatal(c.StartReceiver(context.Background(), receive)) + log.Print("Hello world sample started.") + c, err := cloudevents.NewDefaultClient() + if err != nil { + log.Fatalf("failed to create client, %v", err) + } + log.Fatal(c.StartReceiver(context.Background(), receive)) } ``` -1. Create a new file named `eventschemas.go` and paste the following code. This defines the data schema of the CloudEvents. + +1. Create a new file named `eventschemas.go` and paste the following code. This + defines the data schema of the CloudEvents. ```go package main @@ -102,7 +95,7 @@ cd knative-docs/docs/eventing/samples/helloworld/helloworld-go Msg string `json:"msg,omitempty,string"` } ``` - + 1. In your project directory, create a file named `Dockerfile` and copy the code block below into it. For detailed instructions on dockerizing a Go app, see [Deploying Go servers with Docker](https://blog.golang.org/docker). @@ -134,9 +127,9 @@ cd knative-docs/docs/eventing/samples/helloworld/helloworld-go CMD ["/helloworld"] ``` -1. Create a new file, `sample-app.yaml` and copy the following service definition - into the file. Make sure to replace `{username}` with your Docker Hub - username. +1. Create a new file, `sample-app.yaml` and copy the following service + definition into the file. Make sure to replace `{username}` with your Docker + Hub username. ```yaml # Namespace for sample application with eventing enabled @@ -198,7 +191,7 @@ cd knative-docs/docs/eventing/samples/helloworld/helloworld-go apiVersion: v1 kind: Service name: helloworld-go - ``` + ``` ## Building and deploying the sample @@ -218,178 +211,208 @@ folder) you're ready to build and deploy the sample app. ``` 1. After the build has completed and the container is pushed to docker hub, you - can deploy the sample application into your cluster. Ensure that the container image value - in `sample-app.yaml` matches the container you built in the previous step. Apply - the configuration using `kubectl`: + can deploy the sample application into your cluster. Ensure that the + container image value in `sample-app.yaml` matches the container you built in + the previous step. Apply the configuration using `kubectl`: ```shell kubectl apply --filename sample-app.yaml ``` - 1. Above command created a namespace `knative-samples` and labelled it with `knative-eventing-injection=enabled`, to enable eventing in the namespace. Verify using the following command: - ```shell - kubectl get ns knative-samples --show-labels - ``` - 1. It deployed the helloworld-go app as a K8s Deployment and created a K8s service names helloworld-go. Verify using the following command. - ```shell - kubectl --namespace knative-samples get deployments helloworld-go - - kubectl --namespace knative-samples get svc helloworld-go - ``` - 1. It created a Knative Eventing Trigger to route certain events to the helloworld-go application. Make sure that Ready=true - ```shell - kubectl --namespace knative-samples get trigger helloworld-go - ``` -## Send and verify CloudEvents -Once you have deployed the application and verified that the namespace, sample application and trigger are ready, let's send a CloudEvent. -### Send CloudEvent to the Broker -We can send an http request directly to the [Broker](../../../broker-trigger.md) with correct CloudEvent headers set. + 1. Above command created a namespace `knative-samples` and labelled it with + `knative-eventing-injection=enabled`, to enable eventing in the namespace. + Verify using the following command: - 1. Deploy a curl pod and SSH into it ```shell - kubectl --namespace knative-samples run curl --image=radial/busyboxplus:curl -it + kubectl get ns knative-samples --show-labels ``` - 1. Run the following in the SSH terminal - ```shell - curl -v "default-broker.knative-samples.svc.cluster.local" \ - -X POST \ - -H "Ce-Id: 536808d3-88be-4077-9d7a-a3f162705f79" \ - -H "Ce-specversion: 0.3" \ - -H "Ce-Type: dev.knative.samples.helloworld" \ - -H "Ce-Source: dev.knative.samples/helloworldsource" \ - -H "Content-Type: application/json" \ - -d '{"msg":"Hello World from the curl pod."}' - - - exit - ``` -### Verify that event is received by helloworld-go app -Helloworld-go app logs the context and the msg of the above event, and replies back with another event. - 1. Display helloworld-go app logs + + 1. It deployed the helloworld-go app as a K8s Deployment and created a K8s + service names helloworld-go. Verify using the following command. + ```shell - kubectl --namespace knative-samples logs -l app=helloworld-go --tail=50 + kubectl --namespace knative-samples get deployments helloworld-go + + kubectl --namespace knative-samples get svc helloworld-go ``` - You should see something similar to: - ```shell - Event received. Context: Context Attributes, - specversion: 0.3 - type: dev.knative.samples.helloworld - source: dev.knative.samples/helloworldsource - id: 536808d3-88be-4077-9d7a-a3f162705f79 - time: 2019-10-04T22:35:26.05871736Z - datacontenttype: application/json - Extensions, - knativearrivaltime: 2019-10-04T22:35:26Z - knativehistory: default-kn2-trigger-kn-channel.knative-samples.svc.cluster.local - traceparent: 00-971d4644229653483d38c46e92a959c7-92c66312e4bb39be-00 - - Hello World Message "Hello World from the curl pod." - Responded with event Validation: valid - Context Attributes, - specversion: 0.2 - type: dev.knative.samples.hifromknative - source: knative/eventing/samples/hello-world - id: 37458d77-01f5-411e-a243-a459bbf79682 - Data, - {"msg":"Hi from Knative!"} + 1. It created a Knative Eventing Trigger to route certain events to the + helloworld-go application. Make sure that Ready=true + ```shell + kubectl --namespace knative-samples get trigger helloworld-go ``` - Play around with the CloudEvent attributes in the curl command and the trigger specification to understand how [Triggers work](../../../broker-trigger.md#trigger). + +## Send and verify CloudEvents + +Once you have deployed the application and verified that the namespace, sample +application and trigger are ready, let's send a CloudEvent. + +### Send CloudEvent to the Broker + +We can send an http request directly to the [Broker](../../../broker-trigger.md) +with correct CloudEvent headers set. + +1. Deploy a curl pod and SSH into it + ```shell + kubectl --namespace knative-samples run curl --image=radial/busyboxplus:curl -it + ``` +1. Run the following in the SSH terminal + + ```shell + curl -v "default-broker.knative-samples.svc.cluster.local" \ + -X POST \ + -H "Ce-Id: 536808d3-88be-4077-9d7a-a3f162705f79" \ + -H "Ce-Specversion: 1.0" \ + -H "Ce-Type: dev.knative.samples.helloworld" \ + -H "Ce-Source: dev.knative.samples/helloworldsource" \ + -H "Content-Type: application/json" \ + -d '{"msg":"Hello World from the curl pod."}' + + exit + ``` + +### Verify that event is received by helloworld-go app + +Helloworld-go app logs the context and the msg of the above event, and replies +back with another event. + +1. Display helloworld-go app logs + `shell kubectl --namespace knative-samples logs -l app=helloworld-go --tail=50` + You should see something similar to: + + ```shell + Event received. + Validation: valid + Context Attributes, + specversion: 1.0 + type: dev.knative.samples.helloworld + source: dev.knative.samples/helloworldsource + id: 536808d3-88be-4077-9d7a-a3f162705f79 + time: 2019-10-04T22:35:26.05871736Z + datacontenttype: application/json + Extensions, + knativearrivaltime: 2019-10-04T22:35:26Z + knativehistory: default-kn2-trigger-kn-channel.knative-samples.svc.cluster.local + traceparent: 00-971d4644229653483d38c46e92a959c7-92c66312e4bb39be-00 + Data, + {"msg":"Hello World from the curl pod."} + + Hello World Message "Hello World from the curl pod." + Responded with event + Validation: valid + Context Attributes, + specversion: 1.0 + type: dev.knative.samples.hifromknative + source: knative/eventing/samples/hello-world + id: 37458d77-01f5-411e-a243-a459bbf79682 + datacontenttype: application/json + Data, + {"msg":"Hi from Knative!"} + + ``` + + Play around with the CloudEvent attributes in the curl command and the + trigger specification to understand how + [Triggers work](../../../broker-trigger.md#trigger). ## Verify reply from helloworld-go app -`helloworld-go` app replies back with an event of `type= dev.knative.samples.hifromknative`, and `source=knative/eventing/samples/hello-world`. This event enters the eventing mesh via the Broker and can be delivered to other services using a Trigger - 1. Deploy a pod that receives any CloudEvent and logs the event to its output. - ```shell - kubectl --namespace knative-samples apply --filename - << END - # event-display app deploment - apiVersion: apps/v1 - kind: Deployment - metadata: - name: event-display - namespace: knative-samples - spec: - replicas: 1 - selector: - matchLabels: &labels - app: event-display - template: - metadata: - labels: *labels - spec: - containers: - - name: helloworld-go - image: gcr.io/knative-releases/github.com/knative/eventing-sources/cmd/event_display - --- - # Service that exposes event-display app. - # This will be the subscriber for the Trigger - kind: Service - apiVersion: v1 - metadata: - name: event-display - namespace: knative-samples - spec: - selector: - app: event-display - ports: - - protocol: TCP - port: 80 - targetPort: 8080 - END - ``` - - 1. Create a trigger to deliver the event to the above service - ```shell - kubectl --namespace knative-samples apply --filename - << END - apiVersion: eventing.knative.dev/v1alpha1 - kind: Trigger - metadata: - name: event-display - namespace: knative-samples - spec: - broker: default - filter: - attributes: - type: dev.knative.samples.hifromknative - source: knative/eventing/samples/hello-world - subscriber: - ref: - apiVersion: v1 - kind: Service - name: event-display - END - ``` +`helloworld-go` app replies back with an event of +`type= dev.knative.samples.hifromknative`, and +`source=knative/eventing/samples/hello-world`. This event enters the eventing +mesh via the Broker and can be delivered to other services using a Trigger - 1. [Send a CloudEvent to the Broker](###Send-CloudEvent-to-the-Broker) +1. Deploy a pod that receives any CloudEvent and logs the event to its output. - 1. Check the logs of event-display service - ```shell - kubectl --namespace knative-samples logs -l app=event-display --tail=50 - ``` - You should see something similar to: - ```shell - cloudevents.Event - Validation: valid - Context Attributes, - specversion: 0.3 - type: dev.knative.samples.hifromknative - source: knative/eventing/samples/hello-world - id: 8a7384b9-8bbe-4634-bf0f-ead07e450b2a - time: 2019-10-04T22:53:39.844943931Z - datacontenttype: application/json - Extensions, - knativearrivaltime: 2019-10-04T22:53:39Z - knativehistory: default-kn2-ingress-kn-channel.knative-samples.svc.cluster.local - traceparent: 00-4b01db030b9ea04bb150b77c8fa86509-2740816590a7604f-00 - Data, - { - "msg": "Hi from helloworld-go app!" - } - ``` + ```shell + kubectl --namespace knative-samples apply --filename - << END + # event-display app deploment + apiVersion: apps/v1 + kind: Deployment + metadata: + name: event-display + namespace: knative-samples + spec: + replicas: 1 + selector: + matchLabels: &labels + app: event-display + template: + metadata: + labels: *labels + spec: + containers: + - name: helloworld-go + image: gcr.io/knative-releases/github.com/knative/eventing-sources/cmd/event_display + --- + # Service that exposes event-display app. + # This will be the subscriber for the Trigger + kind: Service + apiVersion: v1 + metadata: + name: event-display + namespace: knative-samples + spec: + selector: + app: event-display + ports: + - protocol: TCP + port: 80 + targetPort: 8080 + END + ``` + +1. Create a trigger to deliver the event to the above service - **Note: You could use the above approach to test your applications too.** - + ```shell + kubectl --namespace knative-samples apply --filename - << END + apiVersion: eventing.knative.dev/v1alpha1 + kind: Trigger + metadata: + name: event-display + namespace: knative-samples + spec: + broker: default + filter: + attributes: + type: dev.knative.samples.hifromknative + source: knative/eventing/samples/hello-world + subscriber: + ref: + apiVersion: v1 + kind: Service + name: event-display + END + ``` + +1. [Send a CloudEvent to the Broker](###Send-CloudEvent-to-the-Broker) + +1. Check the logs of event-display service + ```shell + kubectl --namespace knative-samples logs -l app=event-display --tail=50 + ``` + You should see something similar to: + ```shell + cloudevents.Event + Validation: valid + Context Attributes, + specversion: 0.3 + type: dev.knative.samples.hifromknative + source: knative/eventing/samples/hello-world + id: 8a7384b9-8bbe-4634-bf0f-ead07e450b2a + time: 2019-10-04T22:53:39.844943931Z + datacontenttype: application/json + Extensions, + knativearrivaltime: 2019-10-04T22:53:39Z + knativehistory: default-kn2-ingress-kn-channel.knative-samples.svc.cluster.local + traceparent: 00-4b01db030b9ea04bb150b77c8fa86509-2740816590a7604f-00 + Data, + { + "msg": "Hi from helloworld-go app!" + } + ``` +**Note: You could use the above approach to test your applications too.** ## Removing the sample app deployment diff --git a/docs/eventing/samples/helloworld/helloworld-go/helloworld.go b/docs/eventing/samples/helloworld/helloworld-go/helloworld.go index 6cc47e8b70..36169acabf 100644 --- a/docs/eventing/samples/helloworld/helloworld-go/helloworld.go +++ b/docs/eventing/samples/helloworld/helloworld-go/helloworld.go @@ -2,27 +2,20 @@ package main import ( "context" - "fmt" "log" - "net/http" - "os" - cloudevents "github.com/cloudevents/sdk-go" + cloudevents "github.com/cloudevents/sdk-go/v2" "github.com/google/uuid" ) -type eventData struct { - Message string `json:"message,omitempty,string"` -} - -func receive(ctx context.Context, event cloudevents.Event, response *cloudevents.EventResponse) error { +func receive(ctx context.Context, event cloudevents.Event) (*cloudevents.Event, cloudevents.Result) { // Here is where your code to process the event will go. // In this example we will log the event msg - log.Printf("Event received. Context: %v\n", event.Context) + log.Printf("Event received. \n%s\n", event) data := &HelloWorld{} if err := event.DataAs(data); err != nil { log.Printf("Error while extracting cloudevent Data: %s\n", err.Error()) - return err + return nil, cloudevents.NewHTTPResult(400, "failed to convert data: %s", err) } log.Printf("Hello World Message from received event %q", data.Msg) @@ -33,21 +26,11 @@ func receive(ctx context.Context, event cloudevents.Event, response *cloudevents newEvent.SetID(uuid.New().String()) newEvent.SetSource("knative/eventing/samples/hello-world") newEvent.SetType("dev.knative.samples.hifromknative") - newEvent.SetData(HiFromKnative{Msg: "Hi from helloworld-go app!"}) - response.RespondWith(200, &newEvent) - - log.Printf("Responded with event %v", newEvent) - - return nil -} - -func handler(w http.ResponseWriter, r *http.Request) { - log.Print("Hello world received a request.") - target := os.Getenv("TARGET") - if target == "" { - target = "World" + if err := newEvent.SetData(cloudevents.ApplicationJSON, HiFromKnative{Msg: "Hi from helloworld-go app!"}); err != nil { + return nil, cloudevents.NewHTTPResult(500, "failed to set response data: %s", err) } - fmt.Fprintf(w, "Hello %s!\n", target) + log.Printf("Responding with event\n%s\n", newEvent) + return &newEvent, nil } func main() { diff --git a/docs/eventing/samples/helloworld/helloworld-go/sample-app.yaml b/docs/eventing/samples/helloworld/helloworld-go/sample-app.yaml index b093868d7a..51828f7b73 100644 --- a/docs/eventing/samples/helloworld/helloworld-go/sample-app.yaml +++ b/docs/eventing/samples/helloworld/helloworld-go/sample-app.yaml @@ -6,7 +6,7 @@ metadata: labels: knative-eventing-injection: enabled --- -# Helloworld-go app deploment +# Helloworld-go app deployment apiVersion: apps/v1 kind: Deployment metadata: @@ -41,7 +41,7 @@ spec: targetPort: 8080 --- # Knative Eventing Trigger to trigger the helloworld-go service -apiVersion: eventing.knative.dev/v1alpha1 +apiVersion: eventing.knative.dev/v1beta1 kind: Trigger metadata: name: helloworld-go diff --git a/docs/eventing/samples/kafka/source/README.md b/docs/eventing/samples/kafka/source/README.md index ad81929a44..07666f523c 100644 --- a/docs/eventing/samples/kafka/source/README.md +++ b/docs/eventing/samples/kafka/source/README.md @@ -56,7 +56,7 @@ You must ensure that you meet the [prerequisites listed in the Apache Kafka over 2. Deploy the `KafkaTopic` ```shell - $ kubectl apply -f kafka/source/samples/strimzi-topic.yaml + $ kubectl apply -f strimzi-topic.yaml kafkatopic.kafka.strimzi.io/knative-demo-topic created ``` @@ -70,7 +70,14 @@ You must ensure that you meet the [prerequisites listed in the Apache Kafka over ### Create the Event Display service -1. Build the Event Display Service (`even-display.yaml`) +1. Download a copy of the code: + + ```shell + git clone -b "{{< branch >}}" https://github.com/knative/docs knative-docs + cd knative-docs/docs/eventing/samples/kafka/source + ``` + +2. Build the Event Display Service (`event-display.yaml`) ```yaml apiVersion: serving.knative.dev/v1 @@ -90,7 +97,7 @@ You must ensure that you meet the [prerequisites listed in the Apache Kafka over 1. Deploy the Event Display Service ``` - $ kubectl apply --filename source/samples/event-display.yaml + $ kubectl apply --filename event-display.yaml ... service.serving.knative.dev/event-display created ``` @@ -126,7 +133,7 @@ You must ensure that you meet the [prerequisites listed in the Apache Kafka over 1. Deploy the event source. ``` - $ kubectl apply -f kafka/source/samples/event-source.yaml + $ kubectl apply -f event-source.yaml ... kafkasource.sources.eventing.knative.dev/kafka-source created ``` @@ -211,7 +218,7 @@ You must ensure that you meet the [prerequisites listed in the Apache Kafka over 4. (Optional) Remove the Apache Kafka Topic ```shell - $ kubectl delete -f kafka/source/samples/kafka-topic.yaml + $ kubectl delete -f kafka-topic.yaml kafkatopic.kafka.strimzi.io "knative-demo-topic" deleted ``` diff --git a/docs/eventing/samples/sinkbinding/heartbeats-source.yaml b/docs/eventing/samples/sinkbinding/heartbeats-source.yaml index d7d2504832..5d293c885c 100644 --- a/docs/eventing/samples/sinkbinding/heartbeats-source.yaml +++ b/docs/eventing/samples/sinkbinding/heartbeats-source.yaml @@ -16,7 +16,6 @@ apiVersion: batch/v1beta1 kind: CronJob metadata: name: heartbeat-cron -spec: spec: # Run every minute schedule: "* * * * *" diff --git a/docs/eventing/samples/sinkbinding/sinkbinding.yaml b/docs/eventing/samples/sinkbinding/sinkbinding.yaml index 10f49956af..0bbe3b5dc9 100644 --- a/docs/eventing/samples/sinkbinding/sinkbinding.yaml +++ b/docs/eventing/samples/sinkbinding/sinkbinding.yaml @@ -29,3 +29,7 @@ spec: apiVersion: serving.knative.dev/v1 kind: Service name: event-display + ceOverrides: + extensions: + sink: bound + diff --git a/docs/eventing/sources/README.md b/docs/eventing/sources/README.md index d444c6a14a..448d69befe 100644 --- a/docs/eventing/sources/README.md +++ b/docs/eventing/sources/README.md @@ -41,6 +41,7 @@ Name | Status | Support | Description [GitLab](https://gitlab.com/triggermesh/gitlabsource) | Proof of Concept | None | Registers for events of the specified types on the specified GitLab repository. Brings those events into Knative. [Kubernetes](https://github.com/knative/eventing/blob/master/pkg/apis/sources/v1alpha1/apiserver_types.go) | Active Development | Knative | Brings Kubernetes API server events into Knative. [Ping](https://github.com/knative/eventing/blob/master/pkg/apis/sources/v1alpha2/ping_types.go) | In development | None | Uses an in-memory timer to produce events with a fixed payload on a specified cron schedule. +[VMware](https://github.com/vmware-tanzu/sources-for-knative/tree/{{< branch >}}/pkg/apis/source/v1alpha1/vspheresource_types.go) | Active Development | None | Brings [vSphere](https://www.vmware.com/products/vsphere.html) events into Knative. @@ -64,12 +65,12 @@ These are containers intended to be used with `ContainerSource`. Name | Status | Support | Description --- | --- | --- | --- -[AWS CodeCommit](https://github.com/triggermesh/knative-lambda-sources/tree/master/awscodecommit) | Active Development | TriggerMesh | Registers for events of the specified types on the specified AWS CodeCommit repository. Brings those events into Knative. -[AWS Cognito](https://github.com/triggermesh/knative-lambda-sources/tree/master/awscognito) | Active Development | TriggerMesh | Registers for AWS Cognito events. Brings those events into Knative. -[AWS DynamoDB](https://github.com/triggermesh/knative-lambda-sources/tree/master/awsdynamodb) | Active Development | TriggerMesh | Registers for events of on the specified AWS DynamoDB table. Brings those events into Knative. -[AWS Kinesis](https://github.com/triggermesh/knative-lambda-sources/tree/master/awskinesis) | Active Development | TriggerMesh | Registers for events on the specified AWS Kinesis stream. Brings those events into Knative. -[AWS SNS](https://github.com/triggermesh/knative-lambda-sources/tree/master/awssns) | Active Development | TriggerMesh | Registers for events of the specified AWS SNS endpoint. Brings those events into Knative. -[AWS SQS](https://github.com/triggermesh/knative-lambda-sources/tree/master/awssqs) | Active Development | TriggerMesh | Registers for events of the specified AWS SQS queue. Brings those events into Knative. +[AWS CodeCommit](https://github.com/triggermesh/knative-lambda-sources/tree/master/awscodecommit) | Supported | TriggerMesh | Registers for events of the specified types on the specified AWS CodeCommit repository. Brings those events into Knative. +[AWS Cognito](https://github.com/triggermesh/knative-lambda-sources/tree/master/awscognito) | Supported | TriggerMesh | Registers for AWS Cognito events. Brings those events into Knative. +[AWS DynamoDB](https://github.com/triggermesh/knative-lambda-sources/tree/master/awsdynamodb) | Supported | TriggerMesh | Registers for events of on the specified AWS DynamoDB table. Brings those events into Knative. +[AWS Kinesis](https://github.com/triggermesh/knative-lambda-sources/tree/master/awskinesis) | Supported | TriggerMesh | Registers for events on the specified AWS Kinesis stream. Brings those events into Knative. +[AWS SNS](https://github.com/triggermesh/knative-lambda-sources/tree/master/awssns) | Supported | TriggerMesh | Registers for events of the specified AWS SNS endpoint. Brings those events into Knative. +[AWS SQS](https://github.com/triggermesh/knative-lambda-sources/tree/master/awssqs) | Supported | TriggerMesh | Registers for events of the specified AWS SQS queue. Brings those events into Knative. [FTP / SFTP](https://github.com/vaikas-google/ftp) | Proof of concept | None | Watches for files being uploaded into a FTP/SFTP and generates events for those. [Heartbeat](https://github.com/Harwayne/auto-container-source/tree/master/heartbeat-source) | Proof of Concept | None | Uses an in-memory timer to produce events as the specified interval. Uses AutoContainerSource for underlying infrastructure. [Heartbeats](https://github.com/knative/eventing-contrib/tree/{{< branch >}}/cmd/heartbeats) | Proof of Concept | None | Uses an in-memory timer to produce events at the specified interval. diff --git a/docs/eventing/sources/sources.yaml b/docs/eventing/sources/sources.yaml index 922b359328..9475666932 100644 --- a/docs/eventing/sources/sources.yaml +++ b/docs/eventing/sources/sources.yaml @@ -131,6 +131,12 @@ sources: support: None description: > Brings [Apache CouchDB](https://couchdb.apache.org/) messages into Knative. + - name: VMware + url: https://github.com/vmware-tanzu/sources-for-knative/tree/{{< branch >}}/pkg/apis/source/v1alpha1/vspheresource_types.go + status: Active Development + support: None + description: > + Brings [vSphere](https://www.vmware.com/products/vsphere.html) events into Knative. # These are containers intended to be used with ContainerSource, but are not CRDs. containers: @@ -163,37 +169,37 @@ containers: for underlying infrastructure. - name: AWS CodeCommit url: https://github.com/triggermesh/knative-lambda-sources/tree/master/awscodecommit - status: Active Development + status: Supported support: TriggerMesh description: > Registers for events of the specified types on the specified AWS CodeCommit repository. Brings those events into Knative. - name: AWS Cognito url: https://github.com/triggermesh/knative-lambda-sources/tree/master/awscognito - status: Active Development + status: Supported support: TriggerMesh description: > Registers for AWS Cognito events. Brings those events into Knative. - name: AWS DynamoDB url: https://github.com/triggermesh/knative-lambda-sources/tree/master/awsdynamodb - status: Active Development + status: Supported support: TriggerMesh description: > Registers for events of on the specified AWS DynamoDB table. Brings those events into Knative. - name: AWS Kinesis url: https://github.com/triggermesh/knative-lambda-sources/tree/master/awskinesis - status: Active Development + status: Supported support: TriggerMesh description: > Registers for events on the specified AWS Kinesis stream. Brings those events into Knative. - name: AWS SQS url: https://github.com/triggermesh/knative-lambda-sources/tree/master/awssqs - status: Active Development + status: Supported support: TriggerMesh description: > Registers for events of the specified AWS SQS queue. Brings those events into Knative. - name: AWS SNS url: https://github.com/triggermesh/knative-lambda-sources/tree/master/awssns - status: Active Development + status: Supported support: TriggerMesh description: > Registers for events of the specified AWS SNS endpoint. Brings those events into Knative. diff --git a/docs/install/any-kubernetes-cluster.md b/docs/install/any-kubernetes-cluster.md index ee1c87077f..3468496b60 100644 --- a/docs/install/any-kubernetes-cluster.md +++ b/docs/install/any-kubernetes-cluster.md @@ -228,7 +228,7 @@ The following commands install Kourier and enable its Knative integration. 1. Install the Knative Kourier controller: ```bash - kubectl apply --filename https://raw.githubusercontent.com/knative/serving/{{< version >}}/third_party/kourier-latest/kourier.yaml + kubectl apply --filename {{< artifact repo="net-kourier" file="kourier.yaml" >}} ``` 1. To configure Knative Serving to use Kourier by default: @@ -704,6 +704,20 @@ To learn more about the Apache CouchDB source, read [our documentation]((https:/ {{< /tab >}} +{{% tab name="VMware Sources and Bindings" %}} + +{{< feature-state version="v0.14" state="alpha" >}} + +The following command installs the VMware Sources and Bindings: + + ```bash + kubectl apply --filename {{< artifact org="vmware-tanzu" repo="sources-for-knative" file="release.yaml" >}} + ``` + +To learn more about the VMware sources and bindings, try [our samples](https://github.com/vmware-tanzu/sources-for-knative/tree/master/samples/README.md). + +{{< /tab >}} + diff --git a/docs/install/installing-istio.md b/docs/install/installing-istio.md index 07c5cfdfca..5ad0a43432 100644 --- a/docs/install/installing-istio.md +++ b/docs/install/installing-istio.md @@ -13,9 +13,6 @@ installation. If your cloud platform offers a managed Istio installation, the [install guide](./README.md) for your specific platform will have those instructions. -For example, the [GKE Install Guide](./Knative-with-GKE.md) includes the -instructions for installing Istio on your cluster using `gcloud`. - ## Before you begin You need: @@ -57,7 +54,7 @@ without automatic sidecar injection. ```shell # Download and unpack Istio - export ISTIO_VERSION=1.3.6 + export ISTIO_VERSION=1.4.6 curl -L https://git.io/getLatestIstio | sh - cd istio-${ISTIO_VERSION} ``` @@ -342,5 +339,5 @@ rm -rf istio-${ISTIO_VERSION} [2]: https://istio.io/docs/setup/kubernetes/additional-setup/sidecar-injection/#manual-sidecar-injection [3]: https://istio.io/docs/tasks/traffic-management/ingress/secure-ingress-sds/ -[4]: https://istio.io/docs/tasks/security/mutual-tls/ +[4]: https://istio.io/docs/concepts/security/#mutual-tls-authentication [5]: https://istio.io/docs/tasks/security/authz-http/ diff --git a/docs/serving/installing-cert-manager.md b/docs/serving/installing-cert-manager.md index 88d53d2cd7..2f850d8ad2 100644 --- a/docs/serving/installing-cert-manager.md +++ b/docs/serving/installing-cert-manager.md @@ -27,49 +27,16 @@ You must meet the following requirements to install cert-manager for Knative: component, see the [Knative installation guides](../install/). - You must configure your Knative cluster to use a [custom domain](./using-a-custom-domain.md). -- Knative currently supports cert-manager version `0.6.1` or higher. +- Knative currently supports cert-manager version `0.12.0` or higher. ## Downloading and installing cert-manager Use the following steps to download, install, and configure cert-manager for your Knative cluster environment: -1. Run the following commands to download and extract the `cert-manager` - installation package: +1. Follow the steps in the official `cert-manager` website to download and install cert-manager - ```shell - CERT_MANAGER_VERSION=0.6.1 - DOWNLOAD_URL=https://github.com/jetstack/cert-manager/archive/v${CERT_MANAGER_VERSION}.tar.gz - - wget $DOWNLOAD_URL - tar xzf v${CERT_MANAGER_VERSION}.tar.gz - - cd cert-manager-${CERT_MANAGER_VERSION} - ``` - -1. Run the following commands to install cert-manager: - - 1. Install the cert-manager CRDs: - - ```shell - kubectl apply -f deploy/manifests/00-crds.yaml - ``` - - 1. Run one of the following commands to install the cert-manager plugin: - - - For Knative clusters running Kubernetes version 1.13 or above: - - ```shell - # If you are running cluster version 1.13 or above - kubectl apply -f deploy/manifests/cert-manager.yaml - ``` - - - For Knative clusters running Kubernetes version 1.12 or below: - - ```shell - # If you are running cluster version 1.12 or below, you must append the --validate=false flag - kubectl apply -f deploy/manifests/cert-manager.yaml --validate=false - ``` + [Installation steps](https://cert-manager.io/docs/installation/kubernetes/) 1. Configure which DNS provider is used to validate the DNS-01 challenge requests. diff --git a/docs/serving/samples/autoscale-go/README.md b/docs/serving/samples/autoscale-go/README.md index 061db193d9..1916b14426 100644 --- a/docs/serving/samples/autoscale-go/README.md +++ b/docs/serving/samples/autoscale-go/README.md @@ -111,7 +111,7 @@ A demonstration of the autoscaling capabilities of a Knative Serving Revision. Knative Serving autoscaling is based on the average number of in-flight requests per pod (concurrency). The system has a default -[target concurrency of 100](https://github.com/knative/serving/blob/3f00c39e289ed4bfb84019131651c2e4ea660ab5/config/config-autoscaler.yaml#L35-L41) +[target concurrency of 100](https://github.com/knative/serving/blob/master/config/core/configmaps/autoscaler.yaml#L54-L60) but [we used 10](service.yaml#L25-L26) for our service. We loaded the service with 50 concurrent requests so the autoscaler created 5 pods (`50 concurrent requests / target of 10 = 5 pods`) diff --git a/docs/serving/samples/cloudevents/cloudevents-go/README.md b/docs/serving/samples/cloudevents/cloudevents-go/README.md index 3142bd5016..40976996f2 100644 --- a/docs/serving/samples/cloudevents/cloudevents-go/README.md +++ b/docs/serving/samples/cloudevents/cloudevents-go/README.md @@ -1,15 +1,17 @@ A simple web app written in Go that can receive and send Cloud Events that you -can use for testing. It supports running in two modes: +can use for testing. It supports running in two modes: + 1. The default mode has the app reply to your input events with the output - event, which is simplest for demonstrating things working in isolation, - but is also the model for working for the Knative Eventing `Broker` concept. + event, which is simplest for demonstrating things working in isolation, but + is also the model for working for the Knative Eventing `Broker` concept. -2. `K_SINK` mode has the app send events to the destination encoded in `$K_SINK`, - which is useful to demonstrate how folks can synthesize events to send to - a Service or Broker when not initiated by a Broker invocation (e.g. - implementing an event source) +2. `K_SINK` mode has the app send events to the destination encoded in + `$K_SINK`, which is useful to demonstrate how folks can synthesize events to + send to a Service or Broker when not initiated by a Broker invocation (e.g. + implementing an event source) -The application will use `$K_SINK`-mode whenever the environment variable is specified. +The application will use `$K_SINK`-mode whenever the environment variable is +specified. Follow the steps below to create the sample code and then deploy the app to your cluster. You can also download a working copy of the sample, by running the @@ -34,20 +36,20 @@ cd knative-docs/docs/serving/samples/cloudevents/cloudevents-go different modes of operation: ```go - func (recv *Receiver) ReceiveAndSend(ctx context.Context, event cloudevents.Event) error { + func (recv *Receiver) ReceiveAndSend(ctx context.Context, event cloudevents.Event) cloudevents.Result { // This is called whenever an event is received if $K_SINK is set, and sends a new event // to the url in $K_SINK. } - func (recv *Receiver) ReceiveAndReply(ctx context.Context, event cloudevents.Event, eventResp *cloudevents.EventResponse) error { + func (recv *Receiver) ReceiveAndReply(ctx context.Context, event cloudevents.Event) (*cloudevents.Event, cloudevents.Result) { // This is called whenever an event is received if $K_SINK is NOT set, and it replies with // the new event instead. } ``` -1. If you look in `Dockerfile`, you will see a method for pulling in the dependencies and - building a small Go container based on Alpine. You can build and push this to your - registry of choice via: +1. If you look in `Dockerfile`, you will see a method for pulling in the + dependencies and building a small Go container based on Alpine. You can build + and push this to your registry of choice via: ```shell docker build -t . @@ -103,7 +105,6 @@ You will get back: {"message":"Hello, Dave"} ``` - ## Removing the sample app deployment To remove the sample app from your cluster, delete the service record: diff --git a/docs/serving/samples/cloudevents/cloudevents-go/cloudevents.go b/docs/serving/samples/cloudevents/cloudevents-go/cloudevents.go index be99ce6f31..90024f0ef8 100644 --- a/docs/serving/samples/cloudevents/cloudevents-go/cloudevents.go +++ b/docs/serving/samples/cloudevents/cloudevents-go/cloudevents.go @@ -4,9 +4,8 @@ import ( "context" "fmt" "log" - "net/http" - cloudevents "github.com/cloudevents/sdk-go" + cloudevents "github.com/cloudevents/sdk-go/v2" "github.com/kelseyhightower/envconfig" ) @@ -55,16 +54,15 @@ type Response struct { } // handle shared the logic for producing the Response event from the Request. -func handle(req Request) (resp Response) { - resp.Message = fmt.Sprintf("Hello, %s", req.Name) - return +func handle(req Request) Response { + return Response{Message: fmt.Sprintf("Hello, %s", req.Name)} } // ReceiveAndSend is invoked whenever we receive an event. -func (recv *Receiver) ReceiveAndSend(ctx context.Context, event cloudevents.Event) error { +func (recv *Receiver) ReceiveAndSend(ctx context.Context, event cloudevents.Event) cloudevents.Result { req := Request{} if err := event.DataAs(&req); err != nil { - return err + return cloudevents.NewHTTPResult(400, "failed to convert data: %s", err) } log.Printf("Got an event from: %q", req.Name) @@ -74,19 +72,19 @@ func (recv *Receiver) ReceiveAndSend(ctx context.Context, event cloudevents.Even r := cloudevents.NewEvent(cloudevents.VersionV1) r.SetType("dev.knative.docs.sample") r.SetSource("https://github.com/knative/docs/docs/serving/samples/cloudevents/cloudevents-go") - r.SetDataContentType("application/json") - r.SetData(resp) + if err := r.SetData("application/json", resp); err != nil { + return cloudevents.NewHTTPResult(500, "failed to set response data: %s", err) + } ctx = cloudevents.ContextWithTarget(ctx, recv.Target) - _, _, err := recv.client.Send(ctx, r) - return err + return recv.client.Send(ctx, r) } // ReceiveAndReply is invoked whenever we receive an event. -func (recv *Receiver) ReceiveAndReply(ctx context.Context, event cloudevents.Event, eventResp *cloudevents.EventResponse) error { +func (recv *Receiver) ReceiveAndReply(ctx context.Context, event cloudevents.Event) (*cloudevents.Event, cloudevents.Result) { req := Request{} if err := event.DataAs(&req); err != nil { - return err + return nil, cloudevents.NewHTTPResult(400, "failed to convert data: %s", err) } log.Printf("Got an event from: %q", req.Name) @@ -96,10 +94,9 @@ func (recv *Receiver) ReceiveAndReply(ctx context.Context, event cloudevents.Eve r := cloudevents.NewEvent(cloudevents.VersionV1) r.SetType("dev.knative.docs.sample") r.SetSource("https://github.com/knative/docs/docs/serving/samples/cloudevents/cloudevents-go") - r.SetDataContentType("application/json") - r.SetData(resp) - - eventResp.RespondWith(http.StatusOK, &r) + if err := r.SetData("application/json", resp); err != nil { + return nil, cloudevents.NewHTTPResult(500, "failed to set response data: %s", err) + } - return nil + return &r, nil } diff --git a/docs/serving/using-cert-manager-on-gcp.md b/docs/serving/using-cert-manager-on-gcp.md index d7f8d0adc7..56b3a62720 100644 --- a/docs/serving/using-cert-manager-on-gcp.md +++ b/docs/serving/using-cert-manager-on-gcp.md @@ -105,33 +105,31 @@ TLS certificates and how the requests are validated with Cloud DNS. provider info, including your `cert-manager-cloud-dns-admin` service account. ```shell - kubectl apply --filename - <` is your domain: ```shell - # Change this value to the domain you want to use. - export DOMAIN= - - kubectl apply --filename - < + + kubectl apply --filename - < 0 { - for _, fn := range c.eventDefaulterFns { - event = fn(ctx, event) - } - } - - // Validate the event conforms to the CloudEvents Spec. - if err := event.Validate(); err != nil { - return ctx, nil, err - } - // Send the event over the transport. - return c.transport.Send(ctx, event) -} - -// Receive is called from from the transport on event delivery. -func (c *ceClient) Receive(ctx context.Context, event cloudevents.Event, resp *cloudevents.EventResponse) error { - ctx, r := observability.NewReporter(ctx, reportReceive) - err := c.obsReceive(ctx, event, resp) - if err != nil { - r.Error() - } else { - r.OK() - } - return err -} - -func (c *ceClient) obsReceive(ctx context.Context, event cloudevents.Event, resp *cloudevents.EventResponse) error { - if c.fn != nil { - ctx, rFn := observability.NewReporter(ctx, reportReceiveFn) - err := c.fn.invoke(ctx, event, resp) - if err != nil { - rFn.Error() - } else { - rFn.OK() - } - - // Apply the defaulter chain to the outgoing event. - if err == nil && resp != nil && resp.Event != nil && len(c.eventDefaulterFns) > 0 { - for _, fn := range c.eventDefaulterFns { - *resp.Event = fn(ctx, *resp.Event) - } - // Validate the event conforms to the CloudEvents Spec. - if err := resp.Event.Validate(); err != nil { - return fmt.Errorf("cloudevent validation failed on response event: %v", err) - } - } - return err - } - return nil -} - -// StartReceiver sets up the given fn to handle Receive. -// See Client.StartReceiver for details. This is a blocking call. -func (c *ceClient) StartReceiver(ctx context.Context, fn interface{}) error { - c.receiverMu.Lock() - defer c.receiverMu.Unlock() - - if c.transport == nil { - return fmt.Errorf("client not ready, transport not initialized") - } - if c.fn != nil { - return fmt.Errorf("client already has a receiver") - } - - if fn, err := receiver(fn); err != nil { - return err - } else { - c.fn = fn - } - - defer func() { - c.fn = nil - }() - - return c.transport.StartReceiver(ctx) -} - -func (c *ceClient) applyOptions(opts ...Option) error { - for _, fn := range opts { - if err := fn(c); err != nil { - return err - } - } - return nil -} - -// Convert implements transport Converter.Convert. -func (c *ceClient) Convert(ctx context.Context, m transport.Message, err error) (*cloudevents.Event, error) { - if c.convertFn != nil { - return c.convertFn(ctx, m, err) - } - return nil, err -} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/client/observability.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/client/observability.go deleted file mode 100644 index b844c19a86..0000000000 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/client/observability.go +++ /dev/null @@ -1,68 +0,0 @@ -package client - -import ( - "github.com/cloudevents/sdk-go/pkg/cloudevents/observability" - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" -) - -var ( - // LatencyMs measures the latency in milliseconds for the CloudEvents - // client methods. - LatencyMs = stats.Float64("cloudevents.io/sdk-go/client/latency", "The latency in milliseconds for the CloudEvents client methods.", "ms") -) - -var ( - // LatencyView is an OpenCensus view that shows client method latency. - LatencyView = &view.View{ - Name: "client/latency", - Measure: LatencyMs, - Description: "The distribution of latency inside of client for CloudEvents.", - Aggregation: view.Distribution(0, .01, .1, 1, 10, 100, 1000, 10000), - TagKeys: observability.LatencyTags(), - } -) - -type observed int32 - -// Adheres to Observable -var _ observability.Observable = observed(0) - -const ( - reportSend observed = iota - reportReceive - reportReceiveFn -) - -// TraceName implements Observable.TraceName -func (o observed) TraceName() string { - switch o { - case reportSend: - return "client/send" - case reportReceive: - return "client/receive" - case reportReceiveFn: - return "client/receive/fn" - default: - return "client/unknown" - } -} - -// MethodName implements Observable.MethodName -func (o observed) MethodName() string { - switch o { - case reportSend: - return "send" - case reportReceive: - return "receive" - case reportReceiveFn: - return "receive/fn" - default: - return "unknown" - } -} - -// LatencyMs implements Observable.LatencyMs -func (o observed) LatencyMs() *stats.Float64Measure { - return LatencyMs -} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/client/options.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/client/options.go deleted file mode 100644 index 6e5051c3ea..0000000000 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/client/options.go +++ /dev/null @@ -1,53 +0,0 @@ -package client - -import ( - "fmt" -) - -// Option is the function signature required to be considered an client.Option. -type Option func(*ceClient) error - -// WithEventDefaulter adds an event defaulter to the end of the defaulter chain. -func WithEventDefaulter(fn EventDefaulter) Option { - return func(c *ceClient) error { - if fn == nil { - return fmt.Errorf("client option was given an nil event defaulter") - } - c.eventDefaulterFns = append(c.eventDefaulterFns, fn) - return nil - } -} - -// WithUUIDs adds DefaultIDToUUIDIfNotSet event defaulter to the end of the -// defaulter chain. -func WithUUIDs() Option { - return func(c *ceClient) error { - c.eventDefaulterFns = append(c.eventDefaulterFns, DefaultIDToUUIDIfNotSet) - return nil - } -} - -// WithTimeNow adds DefaultTimeToNowIfNotSet event defaulter to the end of the -// defaulter chain. -func WithTimeNow() Option { - return func(c *ceClient) error { - c.eventDefaulterFns = append(c.eventDefaulterFns, DefaultTimeToNowIfNotSet) - return nil - } -} - -// WithConverterFn defines the function the transport will use to delegate -// conversion of non-decodable messages. -func WithConverterFn(fn ConvertFn) Option { - return func(c *ceClient) error { - if fn == nil { - return fmt.Errorf("client option was given an nil message converter") - } - if c.transport.HasConverter() { - return fmt.Errorf("transport converter already set") - } - c.convertFn = fn - c.transport.SetConverter(c) - return nil - } -} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/client/receiver.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/client/receiver.go deleted file mode 100644 index 9734341d43..0000000000 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/client/receiver.go +++ /dev/null @@ -1,193 +0,0 @@ -package client - -import ( - "context" - "errors" - "fmt" - "reflect" - - "github.com/cloudevents/sdk-go/pkg/cloudevents" - "github.com/cloudevents/sdk-go/pkg/cloudevents/transport" -) - -// Receive is the signature of a fn to be invoked for incoming cloudevents. -// If fn returns an error, EventResponse will not be considered by the client or -// or transport. -// This is just an FYI: -type ReceiveFull func(context.Context, cloudevents.Event, *cloudevents.EventResponse) error - -type receiverFn struct { - numIn int - fnValue reflect.Value - - hasContextIn bool - hasEventIn bool - hasEventResponseIn bool - - hasErrorOut bool -} - -// ConvertFn defines the signature the client expects to enable conversion -// delegation. -type ConvertFn func(context.Context, transport.Message, error) (*cloudevents.Event, error) - -const ( - inParamUsage = "expected a function taking either no parameters, one or more of (context.Context, cloudevents.Event, *cloudevents.EventResponse) ordered" - outParamUsage = "expected a function returning either nothing or an error" -) - -var ( - contextType = reflect.TypeOf((*context.Context)(nil)).Elem() - eventType = reflect.TypeOf((*cloudevents.Event)(nil)).Elem() - eventResponseType = reflect.TypeOf((*cloudevents.EventResponse)(nil)) // want the ptr type - errorType = reflect.TypeOf((*error)(nil)).Elem() -) - -// receiver creates a receiverFn wrapper class that is used by the client to -// validate and invoke the provided function. -// Valid fn signatures are: -// * func() -// * func() error -// * func(context.Context) -// * func(context.Context) error -// * func(cloudevents.Event) -// * func(cloudevents.Event) error -// * func(context.Context, cloudevents.Event) -// * func(context.Context, cloudevents.Event) error -// * func(cloudevents.Event, *cloudevents.EventResponse) -// * func(cloudevents.Event, *cloudevents.EventResponse) error -// * func(context.Context, cloudevents.Event, *cloudevents.EventResponse) -// * func(context.Context, cloudevents.Event, *cloudevents.EventResponse) error -// -func receiver(fn interface{}) (*receiverFn, error) { - fnType := reflect.TypeOf(fn) - if fnType.Kind() != reflect.Func { - return nil, errors.New("must pass a function to handle events") - } - - r := &receiverFn{ - fnValue: reflect.ValueOf(fn), - numIn: fnType.NumIn(), - } - if err := r.validate(fnType); err != nil { - return nil, err - } - - return r, nil -} - -func (r *receiverFn) invoke(ctx context.Context, event cloudevents.Event, resp *cloudevents.EventResponse) error { - args := make([]reflect.Value, 0, r.numIn) - - if r.numIn > 0 { - if r.hasContextIn { - args = append(args, reflect.ValueOf(ctx)) - } - if r.hasEventIn { - args = append(args, reflect.ValueOf(event)) - } - if r.hasEventResponseIn { - args = append(args, reflect.ValueOf(resp)) - } - } - v := r.fnValue.Call(args) - if r.hasErrorOut && len(v) >= 1 { - if err, ok := v[0].Interface().(error); ok { - return err - } - } - return nil -} - -// Verifies that the inputs to a function have a valid signature -// Valid input is to be [0, all] of -// context.Context, cloudevents.Event, *cloudevents.EventResponse in this order. -func (r *receiverFn) validateInParamSignature(fnType reflect.Type) error { - r.hasContextIn = false - r.hasEventIn = false - r.hasEventResponseIn = false - - switch fnType.NumIn() { - case 3: - // has to be cloudevents.Event, *cloudevents.EventResponse - if !fnType.In(2).ConvertibleTo(eventResponseType) { - return fmt.Errorf("%s; cannot convert parameter 2 from %s to *cloudevents.EventResponse", inParamUsage, fnType.In(2)) - } else { - r.hasEventResponseIn = true - } - fallthrough - case 2: - // can be cloudevents.Event or *cloudevents.EventResponse - if !fnType.In(1).ConvertibleTo(eventResponseType) { - if !fnType.In(1).ConvertibleTo(eventType) { - return fmt.Errorf("%s; cannot convert parameter 1 from %s to cloudevents.Event or *cloudevents.EventResponse", inParamUsage, fnType.In(1)) - } else { - r.hasEventIn = true - } - } else if r.hasEventResponseIn { - return fmt.Errorf("%s; duplicate parameter of type *cloudevents.EventResponse", inParamUsage) - } else { - r.hasEventResponseIn = true - } - fallthrough - case 1: - if !fnType.In(0).ConvertibleTo(contextType) { - if !fnType.In(0).ConvertibleTo(eventResponseType) { - if !fnType.In(0).ConvertibleTo(eventType) { - return fmt.Errorf("%s; cannot convert parameter 0 from %s to context.Context, cloudevents.Event or *cloudevents.EventResponse", inParamUsage, fnType.In(0)) - } else if r.hasEventIn { - return fmt.Errorf("%s; duplicate parameter of type cloudevents.Event", inParamUsage) - } else { - r.hasEventIn = true - } - } else if r.hasEventResponseIn { - return fmt.Errorf("%s; duplicate parameter of type *cloudevents.EventResponse", inParamUsage) - } else if r.hasEventIn { - return fmt.Errorf("%s; out of order parameter 0 for %s", inParamUsage, fnType.In(1)) - } else { - r.hasEventResponseIn = true - } - } else { - r.hasContextIn = true - } - fallthrough - case 0: - return nil - default: - return fmt.Errorf("%s; function has too many parameters (%d)", inParamUsage, fnType.NumIn()) - } -} - -// Verifies that the outputs of a function have a valid signature -// Valid output signatures: -// (), (error) -func (r *receiverFn) validateOutParamSignature(fnType reflect.Type) error { - r.hasErrorOut = false - switch fnType.NumOut() { - case 1: - paramNo := fnType.NumOut() - 1 - paramType := fnType.Out(paramNo) - if !paramType.ConvertibleTo(errorType) { - return fmt.Errorf("%s; cannot convert return type %d from %s to error", outParamUsage, paramNo, paramType) - } else { - r.hasErrorOut = true - } - fallthrough - case 0: - return nil - default: - return fmt.Errorf("%s; function has too many return types (%d)", outParamUsage, fnType.NumOut()) - } -} - -// validateReceiverFn validates that a function has the right number of in and -// out params and that they are of allowed types. -func (r *receiverFn) validate(fnType reflect.Type) error { - if err := r.validateInParamSignature(fnType); err != nil { - return err - } - if err := r.validateOutParamSignature(fnType); err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/json/data.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/json/data.go deleted file mode 100644 index 926c344fed..0000000000 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/json/data.go +++ /dev/null @@ -1,97 +0,0 @@ -package json - -import ( - "context" - "encoding/json" - "fmt" - "reflect" - "strconv" - - "github.com/cloudevents/sdk-go/pkg/cloudevents/observability" -) - -// Decode takes `in` as []byte, or base64 string, normalizes in to unquoted and -// base64 decoded []byte if required, and then attempts to use json.Unmarshal -// to convert those bytes to `out`. Returns and error if this process fails. -func Decode(ctx context.Context, in, out interface{}) error { - _, r := observability.NewReporter(ctx, reportDecode) - err := obsDecode(ctx, in, out) - if err != nil { - r.Error() - } else { - r.OK() - } - return err -} - -func obsDecode(ctx context.Context, in, out interface{}) error { - if in == nil { - return nil - } - if out == nil { - return fmt.Errorf("out is nil") - } - - b, ok := in.([]byte) // TODO: I think there is fancy marshaling happening here. Fix with reflection? - if !ok { - var err error - b, err = json.Marshal(in) - if err != nil { - return fmt.Errorf("[json] failed to marshal in: %s", err.Error()) - } - } - - // TODO: the spec says json could be just data... At the moment we expect wrapped. - if len(b) > 1 && (b[0] == byte('"') || (b[0] == byte('\\') && b[1] == byte('"'))) { - s, err := strconv.Unquote(string(b)) - if err != nil { - return fmt.Errorf("[json] failed to unquote in: %s", err.Error()) - } - if len(s) > 0 && (s[0] == '{' || s[0] == '[') { - // looks like json, use it - b = []byte(s) - } - } - - if err := json.Unmarshal(b, out); err != nil { - return fmt.Errorf("[json] found bytes \"%s\", but failed to unmarshal: %s", string(b), err.Error()) - } - return nil -} - -// Encode attempts to json.Marshal `in` into bytes. Encode will inspect `in` -// and returns `in` unmodified if it is detected that `in` is already a []byte; -// Or json.Marshal errors. -func Encode(ctx context.Context, in interface{}) ([]byte, error) { - _, r := observability.NewReporter(ctx, reportEncode) - b, err := obsEncode(ctx, in) - if err != nil { - r.Error() - } else { - r.OK() - } - return b, err -} - -func obsEncode(ctx context.Context, in interface{}) ([]byte, error) { - if in == nil { - return nil, nil - } - - it := reflect.TypeOf(in) - switch it.Kind() { - case reflect.Slice: - if it.Elem().Kind() == reflect.Uint8 { - - if b, ok := in.([]byte); ok && len(b) > 0 { - // check to see if it is a pre-encoded byte string. - if b[0] == byte('"') || b[0] == byte('{') || b[0] == byte('[') { - return b, nil - } - } - - } - } - - return json.Marshal(in) -} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/xml/data.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/xml/data.go deleted file mode 100644 index 6339e44433..0000000000 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/xml/data.go +++ /dev/null @@ -1,90 +0,0 @@ -package xml - -import ( - "context" - "encoding/base64" - "encoding/xml" - "fmt" - "strconv" - - "github.com/cloudevents/sdk-go/pkg/cloudevents/observability" -) - -// Decode takes `in` as []byte, or base64 string, normalizes in to unquoted and -// base64 decoded []byte if required, and then attempts to use xml.Unmarshal -// to convert those bytes to `out`. Returns and error if this process fails. -func Decode(ctx context.Context, in, out interface{}) error { - _, r := observability.NewReporter(ctx, reportDecode) - err := obsDecode(ctx, in, out) - if err != nil { - r.Error() - } else { - r.OK() - } - return err -} - -func obsDecode(ctx context.Context, in, out interface{}) error { - if in == nil { - return nil - } - - b, ok := in.([]byte) - if !ok { - var err error - b, err = xml.Marshal(in) - if err != nil { - return fmt.Errorf("[xml] failed to marshal in: %s", err.Error()) - } - } - - // If the message is encoded as a base64 block as a string, we need to - // decode that first before trying to unmarshal the bytes - if len(b) > 1 && (b[0] == byte('"') || (b[0] == byte('\\') && b[1] == byte('"'))) { - s, err := strconv.Unquote(string(b)) - if err != nil { - return fmt.Errorf("[xml] failed to unquote quoted data: %s", err.Error()) - } - if len(s) > 0 && s[0] == '<' { - // looks like xml, use it - b = []byte(s) - } else if len(s) > 0 { - // looks like base64, decode - bs, err := base64.StdEncoding.DecodeString(s) - if err != nil { - return fmt.Errorf("[xml] failed to decode base64 encoded string: %s", err.Error()) - } - b = bs - } - } - - if err := xml.Unmarshal(b, out); err != nil { - return fmt.Errorf("[xml] found bytes, but failed to unmarshal: %s %s", err.Error(), string(b)) - } - return nil -} - -// Encode attempts to xml.Marshal `in` into bytes. Encode will inspect `in` -// and returns `in` unmodified if it is detected that `in` is already a []byte; -// Or xml.Marshal errors. -func Encode(ctx context.Context, in interface{}) ([]byte, error) { - _, r := observability.NewReporter(ctx, reportEncode) - b, err := obsEncode(ctx, in) - if err != nil { - r.Error() - } else { - r.OK() - } - return b, err -} - -func obsEncode(ctx context.Context, in interface{}) ([]byte, error) { - if b, ok := in.([]byte); ok { - // check to see if it is a pre-encoded byte string. - if len(b) > 0 && b[0] == byte('"') { - return b, nil - } - } - - return xml.Marshal(in) -} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event.go deleted file mode 100644 index e00b9dce3b..0000000000 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event.go +++ /dev/null @@ -1,108 +0,0 @@ -package cloudevents - -import ( - "bytes" - "encoding/json" - "fmt" - "strings" -) - -// Event represents the canonical representation of a CloudEvent. -type Event struct { - Context EventContext - Data interface{} - DataEncoded bool - DataBinary bool -} - -const ( - defaultEventVersion = CloudEventsVersionV02 -) - -// New returns a new Event, an optional version can be passed to change the -// default spec version from 0.2 to the provided version. -func New(version ...string) Event { - specVersion := defaultEventVersion // TODO: should there be a default? or set a default? - if len(version) >= 1 { - specVersion = version[0] - } - e := &Event{} - e.SetSpecVersion(specVersion) - return *e -} - -// DEPRECATED: Access extensions directly via the e.Extensions() map. -// Use functions in the types package to convert extension values. -// For example replace this: -// -// var i int -// err := e.ExtensionAs("foo", &i) -// -// With this: -// -// i, err := types.ToInteger(e.Extensions["foo"]) -// -func (e Event) ExtensionAs(name string, obj interface{}) error { - return e.Context.ExtensionAs(name, obj) -} - -// Validate performs a spec based validation on this event. -// Validation is dependent on the spec version specified in the event context. -func (e Event) Validate() error { - if e.Context == nil { - return fmt.Errorf("every event conforming to the CloudEvents specification MUST include a context") - } - - if err := e.Context.Validate(); err != nil { - return err - } - - // TODO: validate data. - - return nil -} - -// String returns a pretty-printed representation of the Event. -func (e Event) String() string { - b := strings.Builder{} - - b.WriteString("Validation: ") - - valid := e.Validate() - if valid == nil { - b.WriteString("valid\n") - } else { - b.WriteString("invalid\n") - } - if valid != nil { - b.WriteString(fmt.Sprintf("Validation Error: \n%s\n", valid.Error())) - } - - b.WriteString(e.Context.String()) - - if e.Data != nil { - b.WriteString("Data,\n ") - if strings.HasPrefix(e.DataContentType(), ApplicationJSON) { - var prettyJSON bytes.Buffer - - data, ok := e.Data.([]byte) - if !ok { - var err error - data, err = json.Marshal(e.Data) - if err != nil { - data = []byte(err.Error()) - } - } - err := json.Indent(&prettyJSON, data, " ", " ") - if err != nil { - b.Write(e.Data.([]byte)) - } else { - b.Write(prettyJSON.Bytes()) - } - } else { - b.Write(e.Data.([]byte)) - } - b.WriteString("\n") - } - return b.String() -} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_data.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_data.go deleted file mode 100644 index 26ce70ea33..0000000000 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_data.go +++ /dev/null @@ -1,135 +0,0 @@ -package cloudevents - -import ( - "context" - "encoding/base64" - "errors" - "fmt" - "strconv" - - "github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec" -) - -// Data is special. Break it out into it's own file. - -// SetData implements EventWriter.SetData -func (e *Event) SetData(obj interface{}) error { - if e.SpecVersion() != CloudEventsVersionV1 { - return e.legacySetData(obj) - } - - // Version 1.0 and above. - - // TODO: we will have to be smarter about how data relates to media type. - // but the issue is we can not just encode data anymore without understanding - // what the encoding will be on the outbound event. Structured will use - // data_base64, binary will not (if the transport supports binary mode). - - // TODO: look at content encoding too. - - switch obj.(type) { - case []byte: - e.Data = obj - e.DataEncoded = true - e.DataBinary = true - default: - data, err := datacodec.Encode(context.Background(), e.DataMediaType(), obj) - if err != nil { - return err - } - e.Data = data - e.DataEncoded = true - e.DataBinary = false - } - - return nil -} - -func (e *Event) legacySetData(obj interface{}) error { - data, err := datacodec.Encode(context.Background(), e.DataMediaType(), obj) - if err != nil { - return err - } - if e.DeprecatedDataContentEncoding() == Base64 { - buf := make([]byte, base64.StdEncoding.EncodedLen(len(data))) - base64.StdEncoding.Encode(buf, data) - e.Data = string(buf) - } else { - e.Data = data - } - e.DataEncoded = true - return nil -} - -func (e *Event) DataBytes() ([]byte, error) { - if !e.DataEncoded { - if err := e.SetData(e.Data); err != nil { - return nil, err - } - } - - b, ok := e.Data.([]byte) - if !ok { - if s, ok := e.Data.(string); ok { - b = []byte(s) - } else { - // No data. - return []byte(nil), nil - } - } - return b, nil -} - -const ( - quotes = `"'` -) - -// DataAs attempts to populate the provided data object with the event payload. -// data should be a pointer type. -func (e Event) DataAs(data interface{}) error { // TODO: Clean this function up - if e.Data == nil { - return nil - } - obj, ok := e.Data.([]byte) - if !ok { - if s, ok := e.Data.(string); ok { - obj = []byte(s) - } else { - return errors.New("data was not a byte slice or string") - } - } - if len(obj) == 0 { - // No data. - return nil - } - if e.Context.DeprecatedGetDataContentEncoding() == Base64 { - var bs []byte - // test to see if we need to unquote the data. - if obj[0] == quotes[0] || obj[0] == quotes[1] { - str, err := strconv.Unquote(string(obj)) - if err != nil { - return err - } - bs = []byte(str) - } else { - bs = obj - } - - buf := make([]byte, base64.StdEncoding.DecodedLen(len(bs))) - n, err := base64.StdEncoding.Decode(buf, bs) - if err != nil { - return fmt.Errorf("failed to decode data from base64: %s", err.Error()) - } - obj = buf[:n] - } - - mediaType := "" - if e.Context.GetDataContentType() != "" { - var err error - mediaType, err = e.Context.GetDataMediaType() - if err != nil { - return err - } - } - return datacodec.Decode(context.Background(), mediaType, obj, data) -} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_response.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_response.go deleted file mode 100644 index 0e5f7ce75d..0000000000 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_response.go +++ /dev/null @@ -1,37 +0,0 @@ -package cloudevents - -// EventResponse represents the canonical representation of a Response to a -// CloudEvent from a receiver. Response implementation is Transport dependent. -type EventResponse struct { - Status int - Event *Event - Reason string - // Context is transport specific struct to allow for controlling transport - // response details. - // For example, see http.TransportResponseContext. - Context interface{} -} - -// RespondWith sets up the instance of EventResponse to be set with status and -// an event. Response implementation is Transport dependent. -func (e *EventResponse) RespondWith(status int, event *Event) { - if e == nil { - // if nil, response not supported - return - } - e.Status = status - if event != nil { - e.Event = event - } -} - -// Error sets the instance of EventResponse to be set with an error code and -// reason string. Response implementation is Transport dependent. -func (e *EventResponse) Error(status int, reason string) { - if e == nil { - // if nil, response not supported - return - } - e.Status = status - e.Reason = reason -} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v01.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v01.go deleted file mode 100644 index 0b01823beb..0000000000 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v01.go +++ /dev/null @@ -1,272 +0,0 @@ -package cloudevents - -import ( - "fmt" - "sort" - "strings" - - "github.com/cloudevents/sdk-go/pkg/cloudevents/types" -) - -const ( - // CloudEventsVersionV01 represents the version 0.1 of the CloudEvents spec. - CloudEventsVersionV01 = "0.1" -) - -// EventContextV01 holds standard metadata about an event. See -// https://github.com/cloudevents/spec/blob/v0.1/spec.md#context-attributes for -// details on these fields. -type EventContextV01 struct { - // The version of the CloudEvents specification used by the event. - CloudEventsVersion string `json:"cloudEventsVersion,omitempty"` - // ID of the event; must be non-empty and unique within the scope of the producer. - EventID string `json:"eventID"` - // Timestamp when the event happened. - EventTime *types.Timestamp `json:"eventTime,omitempty"` - // Type of occurrence which has happened. - EventType string `json:"eventType"` - // The version of the `eventType`; this is producer-specific. - EventTypeVersion *string `json:"eventTypeVersion,omitempty"` - // A link to the schema that the `data` attribute adheres to. - SchemaURL *types.URLRef `json:"schemaURL,omitempty"` - // A MIME (RFC 2046) string describing the media type of `data`. - // TODO: Should an empty string assume `application/json`, or auto-detect the content? - ContentType *string `json:"contentType,omitempty"` - // A URI describing the event producer. - Source types.URLRef `json:"source"` - // Additional metadata without a well-defined structure. - Extensions map[string]interface{} `json:"extensions,omitempty"` -} - -// Adhere to EventContext -var _ EventContext = (*EventContextV01)(nil) - -// ExtensionAs implements EventContextReader.ExtensionAs -func (ec EventContextV01) ExtensionAs(name string, obj interface{}) error { - value, ok := ec.Extensions[name] - if !ok { - return fmt.Errorf("extension %q does not exist", name) - } - // Only support *string for now. - switch v := obj.(type) { - case *string: - if valueAsString, ok := value.(string); ok { - *v = valueAsString - return nil - } else { - return fmt.Errorf("invalid type for extension %q", name) - } - default: - return fmt.Errorf("unknown extension type %T", obj) - } -} - -// SetExtension adds the extension 'name' with value 'value' to the CloudEvents context. -func (ec *EventContextV01) SetExtension(name string, value interface{}) error { - if ec.Extensions == nil { - ec.Extensions = make(map[string]interface{}) - } - if value == nil { - delete(ec.Extensions, name) - } else { - ec.Extensions[name] = value - } - return nil -} - -// Clone implements EventContextConverter.Clone -func (ec EventContextV01) Clone() EventContext { - return ec.AsV01() -} - -// AsV01 implements EventContextConverter.AsV01 -func (ec EventContextV01) AsV01() *EventContextV01 { - ec.CloudEventsVersion = CloudEventsVersionV01 - return &ec -} - -// AsV02 implements EventContextConverter.AsV02 -func (ec EventContextV01) AsV02() *EventContextV02 { - ret := EventContextV02{ - SpecVersion: CloudEventsVersionV02, - Type: ec.EventType, - Source: ec.Source, - ID: ec.EventID, - Time: ec.EventTime, - SchemaURL: ec.SchemaURL, - ContentType: ec.ContentType, - Extensions: make(map[string]interface{}), - } - - // eventTypeVersion was retired in v0.2, so put it in an extension. - if ec.EventTypeVersion != nil { - _ = ret.SetExtension(EventTypeVersionKey, *ec.EventTypeVersion) - } - if ec.Extensions != nil { - for k, v := range ec.Extensions { - ret.Extensions[k] = v - } - } - if len(ret.Extensions) == 0 { - ret.Extensions = nil - } - return &ret -} - -// AsV03 implements EventContextConverter.AsV03 -func (ec EventContextV01) AsV03() *EventContextV03 { - return ec.AsV02().AsV03() -} - -// AsV1 implements EventContextConverter.AsV1 -func (ec EventContextV01) AsV1() *EventContextV1 { - return ec.AsV02().AsV03().AsV1() -} - -// Validate returns errors based on requirements from the CloudEvents spec. -// For more details, see https://github.com/cloudevents/spec/blob/v0.1/spec.md -func (ec EventContextV01) Validate() error { - errors := []string(nil) - - // eventType - // Type: String - // Constraints: - // REQUIRED - // MUST be a non-empty string - // SHOULD be prefixed with a reverse-DNS name. The prefixed domain dictates the organization which defines the semantics of this event type. - eventType := strings.TrimSpace(ec.EventType) - if eventType == "" { - errors = append(errors, "eventType: MUST be a non-empty string") - } - - // eventTypeVersion - // Type: String - // Constraints: - // OPTIONAL - // If present, MUST be a non-empty string - if ec.EventTypeVersion != nil { - eventTypeVersion := strings.TrimSpace(*ec.EventTypeVersion) - if eventTypeVersion == "" { - errors = append(errors, "eventTypeVersion: if present, MUST be a non-empty string") - } - } - - // cloudEventsVersion - // Type: String - // Constraints: - // REQUIRED - // MUST be a non-empty string - cloudEventsVersion := strings.TrimSpace(ec.CloudEventsVersion) - if cloudEventsVersion == "" { - errors = append(errors, "cloudEventsVersion: MUST be a non-empty string") - } - - // source - // Type: URI - // Constraints: - // REQUIRED - source := strings.TrimSpace(ec.Source.String()) - if source == "" { - errors = append(errors, "source: REQUIRED") - } - - // eventID - // Type: String - // Constraints: - // REQUIRED - // MUST be a non-empty string - // MUST be unique within the scope of the producer - eventID := strings.TrimSpace(ec.EventID) - if eventID == "" { - errors = append(errors, "eventID: MUST be a non-empty string") - - // no way to test "MUST be unique within the scope of the producer" - } - - // eventTime - // Type: Timestamp - // Constraints: - // OPTIONAL - // If present, MUST adhere to the format specified in RFC 3339 - // --> no need to test this, no way to set the eventTime without it being valid. - - // schemaURL - // Type: URI - // Constraints: - // OPTIONAL - // If present, MUST adhere to the format specified in RFC 3986 - if ec.SchemaURL != nil { - schemaURL := strings.TrimSpace(ec.SchemaURL.String()) - // empty string is not RFC 3986 compatible. - if schemaURL == "" { - errors = append(errors, "schemaURL: if present, MUST adhere to the format specified in RFC 3986") - } - } - - // contentType - // Type: String per RFC 2046 - // Constraints: - // OPTIONAL - // If present, MUST adhere to the format specified in RFC 2046 - if ec.ContentType != nil { - contentType := strings.TrimSpace(*ec.ContentType) - if contentType == "" { - // TODO: need to test for RFC 2046 - errors = append(errors, "contentType: if present, MUST adhere to the format specified in RFC 2046") - } - } - - // extensions - // Type: Map - // Constraints: - // OPTIONAL - // If present, MUST contain at least one entry - if ec.Extensions != nil { - if len(ec.Extensions) == 0 { - errors = append(errors, "extensions: if present, MUST contain at least one entry") - } - } - - if len(errors) > 0 { - return fmt.Errorf(strings.Join(errors, "\n")) - } - return nil -} - -// String returns a pretty-printed representation of the EventContext. -func (ec EventContextV01) String() string { - b := strings.Builder{} - - b.WriteString("Context Attributes,\n") - - b.WriteString(" cloudEventsVersion: " + ec.CloudEventsVersion + "\n") - b.WriteString(" eventType: " + ec.EventType + "\n") - if ec.EventTypeVersion != nil { - b.WriteString(" eventTypeVersion: " + *ec.EventTypeVersion + "\n") - } - b.WriteString(" source: " + ec.Source.String() + "\n") - b.WriteString(" eventID: " + ec.EventID + "\n") - if ec.EventTime != nil { - b.WriteString(" eventTime: " + ec.EventTime.String() + "\n") - } - if ec.SchemaURL != nil { - b.WriteString(" schemaURL: " + ec.SchemaURL.String() + "\n") - } - if ec.ContentType != nil { - b.WriteString(" contentType: " + *ec.ContentType + "\n") - } - - if ec.Extensions != nil && len(ec.Extensions) > 0 { - b.WriteString("Extensions,\n") - keys := make([]string, 0, len(ec.Extensions)) - for k := range ec.Extensions { - keys = append(keys, k) - } - sort.Strings(keys) - for _, key := range keys { - b.WriteString(fmt.Sprintf(" %s: %v\n", key, ec.Extensions[key])) - } - } - - return b.String() -} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v01_reader.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v01_reader.go deleted file mode 100644 index 8d75ea70c4..0000000000 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v01_reader.go +++ /dev/null @@ -1,101 +0,0 @@ -package cloudevents - -import ( - "fmt" - "mime" - "time" -) - -// Adhere to EventContextReader -var _ EventContextReader = (*EventContextV01)(nil) - -// GetSpecVersion implements EventContextReader.GetSpecVersion -func (ec EventContextV01) GetSpecVersion() string { - if ec.CloudEventsVersion != "" { - return ec.CloudEventsVersion - } - return CloudEventsVersionV01 -} - -// GetDataContentType implements EventContextReader.GetDataContentType -func (ec EventContextV01) GetDataContentType() string { - if ec.ContentType != nil { - return *ec.ContentType - } - return "" -} - -// GetDataMediaType implements EventContextReader.GetDataMediaType -func (ec EventContextV01) GetDataMediaType() (string, error) { - if ec.ContentType != nil { - mediaType, _, err := mime.ParseMediaType(*ec.ContentType) - if err != nil { - return "", err - } - return mediaType, nil - } - return "", nil -} - -// GetType implements EventContextReader.GetType -func (ec EventContextV01) GetType() string { - return ec.EventType -} - -// GetSource implements EventContextReader.GetSource -func (ec EventContextV01) GetSource() string { - return ec.Source.String() -} - -// GetSubject implements EventContextReader.GetSubject -func (ec EventContextV01) GetSubject() string { - var sub string - if err := ec.ExtensionAs(SubjectKey, &sub); err != nil { - return "" - } - return sub -} - -// GetID implements EventContextReader.GetID -func (ec EventContextV01) GetID() string { - return ec.EventID -} - -// GetTime implements EventContextReader.GetTime -func (ec EventContextV01) GetTime() time.Time { - if ec.EventTime != nil { - return ec.EventTime.Time - } - return time.Time{} -} - -// GetDataSchema implements EventContextReader.GetDataSchema -func (ec EventContextV01) GetDataSchema() string { - if ec.SchemaURL != nil { - return ec.SchemaURL.String() - } - return "" -} - -// DeprecatedGetDataContentEncoding implements EventContextReader.DeprecatedGetDataContentEncoding -func (ec EventContextV01) DeprecatedGetDataContentEncoding() string { - var enc string - if err := ec.ExtensionAs(DataContentEncodingKey, &enc); err != nil { - return "" - } - return enc -} - -// GetExtensions implements EventContextReader.GetExtensions -func (ec EventContextV01) GetExtensions() map[string]interface{} { - return ec.Extensions -} - -// GetExtension implements EventContextReader.GetExtension -func (ec EventContextV01) GetExtension(key string) (interface{}, error) { - v, ok := caseInsensitiveSearch(key, ec.Extensions) - if !ok { - return "", fmt.Errorf("%q not found", key) - } - return v, nil -} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v01_writer.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v01_writer.go deleted file mode 100644 index e49d3cca72..0000000000 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v01_writer.go +++ /dev/null @@ -1,104 +0,0 @@ -package cloudevents - -import ( - "errors" - "fmt" - "net/url" - "strings" - "time" - - "github.com/cloudevents/sdk-go/pkg/cloudevents/types" -) - -// Adhere to EventContextWriter -var _ EventContextWriter = (*EventContextV01)(nil) - -// SetSpecVersion implements EventContextWriter.SetSpecVersion -func (ec *EventContextV01) SetSpecVersion(v string) error { - if v != CloudEventsVersionV01 { - return fmt.Errorf("invalid version %q, expecting %q", v, CloudEventsVersionV01) - } - ec.CloudEventsVersion = CloudEventsVersionV01 - return nil -} - -// SetDataContentType implements EventContextWriter.SetDataContentType -func (ec *EventContextV01) SetDataContentType(ct string) error { - ct = strings.TrimSpace(ct) - if ct == "" { - ec.ContentType = nil - } else { - ec.ContentType = &ct - } - return nil -} - -// SetType implements EventContextWriter.SetType -func (ec *EventContextV01) SetType(t string) error { - t = strings.TrimSpace(t) - ec.EventType = t - return nil -} - -// SetSource implements EventContextWriter.SetSource -func (ec *EventContextV01) SetSource(u string) error { - pu, err := url.Parse(u) - if err != nil { - return err - } - ec.Source = types.URLRef{URL: *pu} - return nil -} - -// SetSubject implements EventContextWriter.SetSubject -func (ec *EventContextV01) SetSubject(s string) error { - s = strings.TrimSpace(s) - if s == "" { - return ec.SetExtension(SubjectKey, nil) - } - return ec.SetExtension(SubjectKey, s) -} - -// SetID implements EventContextWriter.SetID -func (ec *EventContextV01) SetID(id string) error { - id = strings.TrimSpace(id) - if id == "" { - return errors.New("event id is required to be a non-empty string") - } - ec.EventID = id - return nil -} - -// SetTime implements EventContextWriter.SetTime -func (ec *EventContextV01) SetTime(t time.Time) error { - if t.IsZero() { - ec.EventTime = nil - } else { - ec.EventTime = &types.Timestamp{Time: t} - } - return nil -} - -// SetDataSchema implements EventContextWriter.SetDataSchema -func (ec *EventContextV01) SetDataSchema(u string) error { - u = strings.TrimSpace(u) - if u == "" { - ec.SchemaURL = nil - return nil - } - pu, err := url.Parse(u) - if err != nil { - return err - } - ec.SchemaURL = &types.URLRef{URL: *pu} - return nil -} - -// DeprecatedSetDataContentEncoding implements EventContextWriter.DeprecatedSetDataContentEncoding -func (ec *EventContextV01) DeprecatedSetDataContentEncoding(e string) error { - e = strings.ToLower(strings.TrimSpace(e)) - if e == "" { - return ec.SetExtension(DataContentEncodingKey, nil) - } - return ec.SetExtension(DataContentEncodingKey, e) -} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v02.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v02.go deleted file mode 100644 index 3dde8a19b1..0000000000 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v02.go +++ /dev/null @@ -1,291 +0,0 @@ -package cloudevents - -import ( - "encoding/json" - "fmt" - "sort" - "strings" - - "github.com/cloudevents/sdk-go/pkg/cloudevents/types" -) - -const ( - // CloudEventsVersionV02 represents the version 0.2 of the CloudEvents spec. - CloudEventsVersionV02 = "0.2" -) - -// EventContextV02 represents the non-data attributes of a CloudEvents v0.2 -// event. -type EventContextV02 struct { - // The version of the CloudEvents specification used by the event. - SpecVersion string `json:"specversion"` - // The type of the occurrence which has happened. - Type string `json:"type"` - // A URI describing the event producer. - Source types.URLRef `json:"source"` - // ID of the event; must be non-empty and unique within the scope of the producer. - ID string `json:"id"` - // Timestamp when the event happened. - Time *types.Timestamp `json:"time,omitempty"` - // A link to the schema that the `data` attribute adheres to. - SchemaURL *types.URLRef `json:"schemaurl,omitempty"` - // A MIME (RFC2046) string describing the media type of `data`. - // TODO: Should an empty string assume `application/json`, `application/octet-stream`, or auto-detect the content? - ContentType *string `json:"contenttype,omitempty"` - // Additional extension metadata beyond the base spec. - Extensions map[string]interface{} `json:"-"` -} - -// Adhere to EventContext -var _ EventContext = (*EventContextV02)(nil) - -// ExtensionAs implements EventContext.ExtensionAs -func (ec EventContextV02) ExtensionAs(name string, obj interface{}) error { - value, ok := ec.Extensions[name] - if !ok { - return fmt.Errorf("extension %q does not exist", name) - } - - // Try to unmarshal extension if we find it as a RawMessage. - switch v := value.(type) { - case json.RawMessage: - if err := json.Unmarshal(v, obj); err == nil { - // if that worked, return with obj set. - return nil - } - } - // else try as a string ptr. - - // Only support *string for now. - switch v := obj.(type) { - case *string: - if valueAsString, ok := value.(string); ok { - *v = valueAsString - return nil - } else { - return fmt.Errorf("invalid type for extension %q", name) - } - default: - return fmt.Errorf("unknown extension type %T", obj) - } -} - -// SetExtension adds the extension 'name' with value 'value' to the CloudEvents context. -func (ec *EventContextV02) SetExtension(name string, value interface{}) error { - if ec.Extensions == nil { - ec.Extensions = make(map[string]interface{}) - } - if value == nil { - delete(ec.Extensions, name) - } else { - ec.Extensions[name] = value - } - return nil -} - -// Clone implements EventContextConverter.Clone -func (ec EventContextV02) Clone() EventContext { - return ec.AsV02() -} - -// AsV01 implements EventContextConverter.AsV01 -func (ec EventContextV02) AsV01() *EventContextV01 { - ret := EventContextV01{ - CloudEventsVersion: CloudEventsVersionV01, - EventID: ec.ID, - EventTime: ec.Time, - EventType: ec.Type, - SchemaURL: ec.SchemaURL, - Source: ec.Source, - ContentType: ec.ContentType, - Extensions: make(map[string]interface{}), - } - - for k, v := range ec.Extensions { - // eventTypeVersion was retired in v0.2 - if strings.EqualFold(k, EventTypeVersionKey) { - etv, ok := v.(string) - if ok && etv != "" { - ret.EventTypeVersion = &etv - } - continue - } - ret.Extensions[k] = v - } - if len(ret.Extensions) == 0 { - ret.Extensions = nil - } - return &ret -} - -// AsV02 implements EventContextConverter.AsV02 -func (ec EventContextV02) AsV02() *EventContextV02 { - ec.SpecVersion = CloudEventsVersionV02 - return &ec -} - -// AsV03 implements EventContextConverter.AsV03 -func (ec EventContextV02) AsV03() *EventContextV03 { - ret := EventContextV03{ - SpecVersion: CloudEventsVersionV03, - ID: ec.ID, - Time: ec.Time, - Type: ec.Type, - SchemaURL: ec.SchemaURL, - DataContentType: ec.ContentType, - Source: ec.Source, - Extensions: make(map[string]interface{}), - } - - for k, v := range ec.Extensions { - // Subject was introduced in 0.3 - if strings.EqualFold(k, SubjectKey) { - sub, ok := v.(string) - if ok && sub != "" { - ret.Subject = &sub - } - continue - } - // DeprecatedDataContentEncoding was introduced in 0.3 - if strings.EqualFold(k, DataContentEncodingKey) { - etv, ok := v.(string) - if ok && etv != "" { - ret.DataContentEncoding = &etv - } - continue - } - ret.Extensions[k] = v - } - if len(ret.Extensions) == 0 { - ret.Extensions = nil - } - - return &ret -} - -// AsV1 implements EventContextConverter.AsV1 -func (ec EventContextV02) AsV1() *EventContextV1 { - return ec.AsV03().AsV1() -} - -// Validate returns errors based on requirements from the CloudEvents spec. -// For more details, see https://github.com/cloudevents/spec/blob/v0.2/spec.md -func (ec EventContextV02) Validate() error { - errors := []string(nil) - - // type - // Type: String - // Constraints: - // REQUIRED - // MUST be a non-empty string - // SHOULD be prefixed with a reverse-DNS name. The prefixed domain dictates the organization which defines the semantics of this event type. - eventType := strings.TrimSpace(ec.Type) - if eventType == "" { - errors = append(errors, "type: MUST be a non-empty string") - } - - // specversion - // Type: String - // Constraints: - // REQUIRED - // MUST be a non-empty string - specVersion := strings.TrimSpace(ec.SpecVersion) - if specVersion == "" { - errors = append(errors, "specversion: MUST be a non-empty string") - } - - // source - // Type: URI-reference - // Constraints: - // REQUIRED - source := strings.TrimSpace(ec.Source.String()) - if source == "" { - errors = append(errors, "source: REQUIRED") - } - - // id - // Type: String - // Constraints: - // REQUIRED - // MUST be a non-empty string - // MUST be unique within the scope of the producer - id := strings.TrimSpace(ec.ID) - if id == "" { - errors = append(errors, "id: MUST be a non-empty string") - - // no way to test "MUST be unique within the scope of the producer" - } - - // time - // Type: Timestamp - // Constraints: - // OPTIONAL - // If present, MUST adhere to the format specified in RFC 3339 - // --> no need to test this, no way to set the time without it being valid. - - // schemaurl - // Type: URI - // Constraints: - // OPTIONAL - // If present, MUST adhere to the format specified in RFC 3986 - if ec.SchemaURL != nil { - schemaURL := strings.TrimSpace(ec.SchemaURL.String()) - // empty string is not RFC 3986 compatible. - if schemaURL == "" { - errors = append(errors, "schemaurl: if present, MUST adhere to the format specified in RFC 3986") - } - } - - // contenttype - // Type: String per RFC 2046 - // Constraints: - // OPTIONAL - // If present, MUST adhere to the format specified in RFC 2046 - if ec.ContentType != nil { - contentType := strings.TrimSpace(*ec.ContentType) - if contentType == "" { - // TODO: need to test for RFC 2046 - errors = append(errors, "contenttype: if present, MUST adhere to the format specified in RFC 2046") - } - } - - if len(errors) > 0 { - return fmt.Errorf(strings.Join(errors, "\n")) - } - return nil -} - -// String returns a pretty-printed representation of the EventContext. -func (ec EventContextV02) String() string { - b := strings.Builder{} - - b.WriteString("Context Attributes,\n") - - b.WriteString(" specversion: " + ec.SpecVersion + "\n") - b.WriteString(" type: " + ec.Type + "\n") - b.WriteString(" source: " + ec.Source.String() + "\n") - b.WriteString(" id: " + ec.ID + "\n") - if ec.Time != nil { - b.WriteString(" time: " + ec.Time.String() + "\n") - } - if ec.SchemaURL != nil { - b.WriteString(" schemaurl: " + ec.SchemaURL.String() + "\n") - } - if ec.ContentType != nil { - b.WriteString(" contenttype: " + *ec.ContentType + "\n") - } - - if ec.Extensions != nil && len(ec.Extensions) > 0 { - b.WriteString("Extensions,\n") - keys := make([]string, 0, len(ec.Extensions)) - for k := range ec.Extensions { - keys = append(keys, k) - } - sort.Strings(keys) - for _, key := range keys { - b.WriteString(fmt.Sprintf(" %s: %v\n", key, ec.Extensions[key])) - } - } - - return b.String() -} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v02_reader.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v02_reader.go deleted file mode 100644 index 120cdb87ec..0000000000 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v02_reader.go +++ /dev/null @@ -1,101 +0,0 @@ -package cloudevents - -import ( - "fmt" - "mime" - "time" -) - -// Adhere to EventContextReader -var _ EventContextReader = (*EventContextV02)(nil) - -// GetSpecVersion implements EventContextReader.GetSpecVersion -func (ec EventContextV02) GetSpecVersion() string { - if ec.SpecVersion != "" { - return ec.SpecVersion - } - return CloudEventsVersionV02 -} - -// GetType implements EventContextReader.GetType -func (ec EventContextV02) GetType() string { - return ec.Type -} - -// GetSource implements EventContextReader.GetSource -func (ec EventContextV02) GetSource() string { - return ec.Source.String() -} - -// GetSubject implements EventContextReader.GetSubject -func (ec EventContextV02) GetSubject() string { - var sub string - if err := ec.ExtensionAs(SubjectKey, &sub); err != nil { - return "" - } - return sub -} - -// GetID implements EventContextReader.GetID -func (ec EventContextV02) GetID() string { - return ec.ID -} - -// GetTime implements EventContextReader.GetTime -func (ec EventContextV02) GetTime() time.Time { - if ec.Time != nil { - return ec.Time.Time - } - return time.Time{} -} - -// GetDataSchema implements EventContextReader.GetDataSchema -func (ec EventContextV02) GetDataSchema() string { - if ec.SchemaURL != nil { - return ec.SchemaURL.String() - } - return "" -} - -// GetDataContentType implements EventContextReader.GetDataContentType -func (ec EventContextV02) GetDataContentType() string { - if ec.ContentType != nil { - return *ec.ContentType - } - return "" -} - -// GetDataMediaType implements EventContextReader.GetDataMediaType -func (ec EventContextV02) GetDataMediaType() (string, error) { - if ec.ContentType != nil { - mediaType, _, err := mime.ParseMediaType(*ec.ContentType) - if err != nil { - return "", err - } - return mediaType, nil - } - return "", nil -} - -// DeprecatedGetDataContentEncoding implements EventContextReader.DeprecatedGetDataContentEncoding -func (ec EventContextV02) DeprecatedGetDataContentEncoding() string { - var enc string - if err := ec.ExtensionAs(DataContentEncodingKey, &enc); err != nil { - return "" - } - return enc -} - -// GetExtensions implements EventContextReader.GetExtensions -func (ec EventContextV02) GetExtensions() map[string]interface{} { - return ec.Extensions -} - -// GetExtension implements EventContextReader.GetExtension -func (ec EventContextV02) GetExtension(key string) (interface{}, error) { - v, ok := caseInsensitiveSearch(key, ec.Extensions) - if !ok { - return "", fmt.Errorf("%q not found", key) - } - return v, nil -} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v02_writer.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v02_writer.go deleted file mode 100644 index 25b8a16c8d..0000000000 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v02_writer.go +++ /dev/null @@ -1,104 +0,0 @@ -package cloudevents - -import ( - "errors" - "fmt" - "net/url" - "strings" - "time" - - "github.com/cloudevents/sdk-go/pkg/cloudevents/types" -) - -// Adhere to EventContextWriter -var _ EventContextWriter = (*EventContextV02)(nil) - -// SetSpecVersion implements EventContextWriter.SetSpecVersion -func (ec *EventContextV02) SetSpecVersion(v string) error { - if v != CloudEventsVersionV02 { - return fmt.Errorf("invalid version %q, expecting %q", v, CloudEventsVersionV02) - } - ec.SpecVersion = CloudEventsVersionV02 - return nil -} - -// SetDataContentType implements EventContextWriter.SetDataContentType -func (ec *EventContextV02) SetDataContentType(ct string) error { - ct = strings.TrimSpace(ct) - if ct == "" { - ec.ContentType = nil - } else { - ec.ContentType = &ct - } - return nil -} - -// SetType implements EventContextWriter.SetType -func (ec *EventContextV02) SetType(t string) error { - t = strings.TrimSpace(t) - ec.Type = t - return nil -} - -// SetSource implements EventContextWriter.SetSource -func (ec *EventContextV02) SetSource(u string) error { - pu, err := url.Parse(u) - if err != nil { - return err - } - ec.Source = types.URLRef{URL: *pu} - return nil -} - -// SetSubject implements EventContextWriter.SetSubject -func (ec *EventContextV02) SetSubject(s string) error { - s = strings.TrimSpace(s) - if s == "" { - return ec.SetExtension(SubjectKey, nil) - } - return ec.SetExtension(SubjectKey, s) -} - -// SetID implements EventContextWriter.SetID -func (ec *EventContextV02) SetID(id string) error { - id = strings.TrimSpace(id) - if id == "" { - return errors.New("id is required to be a non-empty string") - } - ec.ID = id - return nil -} - -// SetTime implements EventContextWriter.SetTime -func (ec *EventContextV02) SetTime(t time.Time) error { - if t.IsZero() { - ec.Time = nil - } else { - ec.Time = &types.Timestamp{Time: t} - } - return nil -} - -// SetDataSchema implements EventContextWriter.SetDataSchema -func (ec *EventContextV02) SetDataSchema(u string) error { - u = strings.TrimSpace(u) - if u == "" { - ec.SchemaURL = nil - return nil - } - pu, err := url.Parse(u) - if err != nil { - return err - } - ec.SchemaURL = &types.URLRef{URL: *pu} - return nil -} - -// DeprecatedSetDataContentEncoding implements EventContextWriter.DeprecatedSetDataContentEncoding -func (ec *EventContextV02) DeprecatedSetDataContentEncoding(e string) error { - e = strings.ToLower(strings.TrimSpace(e)) - if e == "" { - return ec.SetExtension(DataContentEncodingKey, nil) - } - return ec.SetExtension(DataContentEncodingKey, e) -} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/codec.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/codec.go deleted file mode 100644 index 091064c915..0000000000 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/codec.go +++ /dev/null @@ -1,35 +0,0 @@ -package transport - -import ( - "context" - "fmt" - - "github.com/cloudevents/sdk-go/pkg/cloudevents" -) - -// Codec is the interface for transport codecs to convert between transport -// specific payloads and the Message interface. -type Codec interface { - Encode(context.Context, cloudevents.Event) (Message, error) - Decode(context.Context, Message) (*cloudevents.Event, error) -} - -// ErrMessageEncodingUnknown is an error produced when the encoding for an incoming -// message can not be understood. -type ErrMessageEncodingUnknown struct { - codec string - transport string -} - -// NewErrMessageEncodingUnknown makes a new ErrMessageEncodingUnknown. -func NewErrMessageEncodingUnknown(codec, transport string) *ErrMessageEncodingUnknown { - return &ErrMessageEncodingUnknown{ - codec: codec, - transport: transport, - } -} - -// Error implements error.Error -func (e *ErrMessageEncodingUnknown) Error() string { - return fmt.Sprintf("message encoding unknown for %s codec on %s transport", e.codec, e.transport) -} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec.go deleted file mode 100644 index 9dcceda0be..0000000000 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec.go +++ /dev/null @@ -1,154 +0,0 @@ -package http - -import ( - "context" - "errors" - "fmt" - "sync" - - "github.com/cloudevents/sdk-go/pkg/cloudevents" - cecontext "github.com/cloudevents/sdk-go/pkg/cloudevents/context" - "github.com/cloudevents/sdk-go/pkg/cloudevents/transport" -) - -// Codec is the wrapper for all versions of codecs supported by the http -// transport. -type Codec struct { - // Encoding is the setting to inform the DefaultEncodingSelectionFn for - // selecting a codec. - Encoding Encoding - - // DefaultEncodingSelectionFn allows for encoding selection strategies to be injected. - DefaultEncodingSelectionFn EncodingSelector - - v01 *CodecV01 - v02 *CodecV02 - v03 *CodecV03 - v1 *CodecV1 - - _v01 sync.Once - _v02 sync.Once - _v03 sync.Once - _v1 sync.Once -} - -// Adheres to Codec -var _ transport.Codec = (*Codec)(nil) - -func (c *Codec) loadCodec(encoding Encoding) (transport.Codec, error) { - switch encoding { - case Default: - fallthrough - case BinaryV01, StructuredV01: - c._v01.Do(func() { - c.v01 = &CodecV01{DefaultEncoding: c.Encoding} - }) - return c.v01, nil - case BinaryV02, StructuredV02: - c._v02.Do(func() { - c.v02 = &CodecV02{DefaultEncoding: c.Encoding} - }) - return c.v02, nil - case BinaryV03, StructuredV03, BatchedV03: - c._v03.Do(func() { - c.v03 = &CodecV03{DefaultEncoding: c.Encoding} - }) - return c.v03, nil - case BinaryV1, StructuredV1, BatchedV1: - c._v1.Do(func() { - c.v1 = &CodecV1{DefaultEncoding: c.Encoding} - }) - return c.v1, nil - } - return nil, fmt.Errorf("unknown encoding: %s", encoding) -} - -// Encode encodes the provided event into a transport message. -func (c *Codec) Encode(ctx context.Context, e cloudevents.Event) (transport.Message, error) { - encoding := c.Encoding - if encoding == Default && c.DefaultEncodingSelectionFn != nil { - encoding = c.DefaultEncodingSelectionFn(ctx, e) - } - codec, err := c.loadCodec(encoding) - if err != nil { - return nil, err - } - ctx = cecontext.WithEncoding(ctx, encoding.Name()) - return codec.Encode(ctx, e) -} - -// Decode converts a provided transport message into an Event, or error. -func (c *Codec) Decode(ctx context.Context, msg transport.Message) (*cloudevents.Event, error) { - codec, err := c.loadCodec(c.inspectEncoding(ctx, msg)) - if err != nil { - return nil, err - } - event, err := codec.Decode(ctx, msg) - if err != nil { - return nil, err - } - return c.convertEvent(event) -} - -// Give the context back as the user expects -func (c *Codec) convertEvent(event *cloudevents.Event) (*cloudevents.Event, error) { - if event == nil { - return nil, errors.New("event is nil, can not convert") - } - - switch c.Encoding { - case Default: - return event, nil - case BinaryV01, StructuredV01: - ca := event.Context.AsV01() - event.Context = ca - return event, nil - case BinaryV02, StructuredV02: - ca := event.Context.AsV02() - event.Context = ca - return event, nil - case BinaryV03, StructuredV03, BatchedV03: - ca := event.Context.AsV03() - event.Context = ca - return event, nil - case BinaryV1, StructuredV1, BatchedV1: - ca := event.Context.AsV03() - event.Context = ca - return event, nil - default: - return nil, fmt.Errorf("unknown encoding: %s", c.Encoding) - } -} - -func (c *Codec) inspectEncoding(ctx context.Context, msg transport.Message) Encoding { - // Try v1.0. - _, _ = c.loadCodec(BinaryV1) - encoding := c.v1.inspectEncoding(ctx, msg) - if encoding != Unknown { - return encoding - } - - // Try v0.3. - _, _ = c.loadCodec(BinaryV03) - encoding = c.v03.inspectEncoding(ctx, msg) - if encoding != Unknown { - return encoding - } - - // Try v0.2. - _, _ = c.loadCodec(BinaryV02) - encoding = c.v02.inspectEncoding(ctx, msg) - if encoding != Unknown { - return encoding - } - - // Try v0.1 first. - _, _ = c.loadCodec(BinaryV01) - encoding = c.v01.inspectEncoding(ctx, msg) - if encoding != Unknown { - return encoding - } - - // We do not understand the message encoding. - return Unknown -} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec_structured.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec_structured.go deleted file mode 100644 index d67d7186ee..0000000000 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec_structured.go +++ /dev/null @@ -1,44 +0,0 @@ -package http - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - - "github.com/cloudevents/sdk-go/pkg/cloudevents" - "github.com/cloudevents/sdk-go/pkg/cloudevents/transport" -) - -// CodecStructured represents an structured http transport codec for all versions. -// Intended to be used as a base class. -type CodecStructured struct { - DefaultEncoding Encoding -} - -func (v CodecStructured) encodeStructured(ctx context.Context, e cloudevents.Event) (transport.Message, error) { - header := http.Header{} - header.Set("Content-Type", cloudevents.ApplicationCloudEventsJSON) - - body, err := json.Marshal(e) - if err != nil { - return nil, err - } - - msg := &Message{ - Header: header, - Body: body, - } - - return msg, nil -} - -func (v CodecStructured) decodeStructured(ctx context.Context, version string, msg transport.Message) (*cloudevents.Event, error) { - m, ok := msg.(*Message) - if !ok { - return nil, fmt.Errorf("failed to convert transport.Message to http.Message") - } - event := cloudevents.New(version) - err := json.Unmarshal(m.Body, &event) - return &event, err -} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec_v01.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec_v01.go deleted file mode 100644 index 435ba93fb5..0000000000 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec_v01.go +++ /dev/null @@ -1,232 +0,0 @@ -package http - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/textproto" - "strings" - - "github.com/cloudevents/sdk-go/pkg/cloudevents" - cecontext "github.com/cloudevents/sdk-go/pkg/cloudevents/context" - "github.com/cloudevents/sdk-go/pkg/cloudevents/observability" - "github.com/cloudevents/sdk-go/pkg/cloudevents/transport" - "github.com/cloudevents/sdk-go/pkg/cloudevents/types" -) - -// CodecV01 represents a http transport codec that uses CloudEvents spec v0.1 -type CodecV01 struct { - CodecStructured - - DefaultEncoding Encoding -} - -// Adheres to Codec -var _ transport.Codec = (*CodecV01)(nil) - -// Encode implements Codec.Encode -func (v CodecV01) Encode(ctx context.Context, e cloudevents.Event) (transport.Message, error) { - encoding := v.DefaultEncoding - strEnc := cecontext.EncodingFrom(ctx) - if strEnc != "" { - switch strEnc { - case Binary: - encoding = BinaryV01 - case Structured: - encoding = StructuredV01 - } - } - - _, r := observability.NewReporter(context.Background(), CodecObserved{o: reportEncode, c: encoding.Codec()}) - m, err := v.obsEncode(ctx, e, encoding) - if err != nil { - r.Error() - } else { - r.OK() - } - return m, err -} - -func (v CodecV01) obsEncode(ctx context.Context, e cloudevents.Event, encoding Encoding) (transport.Message, error) { - switch encoding { - case Default: - fallthrough - case BinaryV01: - return v.encodeBinary(ctx, e) - case StructuredV01: - return v.encodeStructured(ctx, e) - default: - return nil, fmt.Errorf("unknown encoding: %d", encoding) - } -} - -// Decode implements Codec.Decode -func (v CodecV01) Decode(ctx context.Context, msg transport.Message) (*cloudevents.Event, error) { - _, r := observability.NewReporter(ctx, CodecObserved{o: reportDecode, c: v.inspectEncoding(ctx, msg).Codec()}) // TODO: inspectEncoding is not free. - e, err := v.obsDecode(ctx, msg) - if err != nil { - r.Error() - } else { - r.OK() - } - return e, err -} - -func (v CodecV01) obsDecode(ctx context.Context, msg transport.Message) (*cloudevents.Event, error) { - switch v.inspectEncoding(ctx, msg) { - case BinaryV01: - return v.decodeBinary(ctx, msg) - case StructuredV01: - return v.decodeStructured(ctx, cloudevents.CloudEventsVersionV01, msg) - default: - return nil, transport.NewErrMessageEncodingUnknown("v01", TransportName) - } -} - -func (v CodecV01) encodeBinary(ctx context.Context, e cloudevents.Event) (transport.Message, error) { - header, err := v.toHeaders(e.Context.AsV01()) - if err != nil { - return nil, err - } - - body, err := e.DataBytes() - if err != nil { - panic("encode") - } - - msg := &Message{ - Header: header, - Body: body, - } - - return msg, nil -} - -func (v CodecV01) toHeaders(ec *cloudevents.EventContextV01) (http.Header, error) { - // Preserve case in v0.1, even though HTTP headers are case-insensitive. - h := http.Header{} - h["CE-CloudEventsVersion"] = []string{ec.CloudEventsVersion} - h["CE-EventID"] = []string{ec.EventID} - h["CE-EventType"] = []string{ec.EventType} - h["CE-Source"] = []string{ec.Source.String()} - if ec.EventTime != nil && !ec.EventTime.IsZero() { - h["CE-EventTime"] = []string{ec.EventTime.String()} - } - if ec.EventTypeVersion != nil { - h["CE-EventTypeVersion"] = []string{*ec.EventTypeVersion} - } - if ec.SchemaURL != nil { - h["CE-DataSchema"] = []string{ec.SchemaURL.String()} - } - if ec.ContentType != nil && *ec.ContentType != "" { - h.Set("Content-Type", *ec.ContentType) - } - - // Regarding Extensions, v0.1 Spec says the following: - // * Each map entry name MUST be prefixed with "CE-X-" - // * Each map entry name's first character MUST be capitalized - for k, v := range ec.Extensions { - encoded, err := json.Marshal(v) - if err != nil { - return nil, err - } - h["CE-X-"+strings.Title(k)] = []string{string(encoded)} - } - return h, nil -} - -func (v CodecV01) decodeBinary(ctx context.Context, msg transport.Message) (*cloudevents.Event, error) { - m, ok := msg.(*Message) - if !ok { - return nil, fmt.Errorf("failed to convert transport.Message to http.Message") - } - ca, err := v.fromHeaders(m.Header) - if err != nil { - return nil, err - } - var body interface{} - if len(m.Body) > 0 { - body = m.Body - } - return &cloudevents.Event{ - Context: &ca, - Data: body, - DataEncoded: body != nil, - }, nil -} - -func (v CodecV01) fromHeaders(h http.Header) (cloudevents.EventContextV01, error) { - // Normalize headers. - for k, v := range h { - ck := textproto.CanonicalMIMEHeaderKey(k) - if k != ck { - h[ck] = v - } - } - - ec := cloudevents.EventContextV01{} - ec.CloudEventsVersion = h.Get("CE-CloudEventsVersion") - h.Del("CE-CloudEventsVersion") - ec.EventID = h.Get("CE-EventID") - h.Del("CE-EventID") - ec.EventType = h.Get("CE-EventType") - h.Del("CE-EventType") - source := types.ParseURLRef(h.Get("CE-Source")) - h.Del("CE-Source") - if source != nil { - ec.Source = *source - } - var err error - ec.EventTime, err = types.ParseTimestamp(h.Get("CE-EventTime")) - if err != nil { - return ec, err - } - h.Del("CE-EventTime") - etv := h.Get("CE-EventTypeVersion") - h.Del("CE-EventTypeVersion") - if etv != "" { - ec.EventTypeVersion = &etv - } - ec.SchemaURL = types.ParseURLRef(h.Get("CE-DataSchema")) - h.Del("CE-DataSchema") - et := h.Get("Content-Type") - if et != "" { - ec.ContentType = &et - } - - extensions := make(map[string]interface{}) - for k, v := range h { - if len(k) > len("CE-X-") && strings.EqualFold(k[:len("CE-X-")], "CE-X-") { - key := k[len("CE-X-"):] - var tmp interface{} - if err := json.Unmarshal([]byte(v[0]), &tmp); err == nil { - extensions[key] = tmp - } else { - // If we can't unmarshal the data, treat it as a string. - extensions[key] = v[0] - } - h.Del(k) - } - } - if len(extensions) > 0 { - ec.Extensions = extensions - } - return ec, nil -} - -func (v CodecV01) inspectEncoding(ctx context.Context, msg transport.Message) Encoding { - version := msg.CloudEventsVersion() - if version != cloudevents.CloudEventsVersionV01 { - return Unknown - } - m, ok := msg.(*Message) - if !ok { - return Unknown - } - contentType := m.Header.Get("Content-Type") - if contentType == cloudevents.ApplicationCloudEventsJSON { - return StructuredV01 - } - return BinaryV01 -} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec_v02.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec_v02.go deleted file mode 100644 index aeb67c0e8c..0000000000 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec_v02.go +++ /dev/null @@ -1,261 +0,0 @@ -package http - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/textproto" - "strings" - - "github.com/cloudevents/sdk-go/pkg/cloudevents" - cecontext "github.com/cloudevents/sdk-go/pkg/cloudevents/context" - "github.com/cloudevents/sdk-go/pkg/cloudevents/observability" - "github.com/cloudevents/sdk-go/pkg/cloudevents/transport" - "github.com/cloudevents/sdk-go/pkg/cloudevents/types" -) - -// CodecV02 represents a http transport codec that uses CloudEvents spec v0.2 -type CodecV02 struct { - CodecStructured - - DefaultEncoding Encoding -} - -// Adheres to Codec -var _ transport.Codec = (*CodecV02)(nil) - -// Encode implements Codec.Encode -func (v CodecV02) Encode(ctx context.Context, e cloudevents.Event) (transport.Message, error) { - encoding := v.DefaultEncoding - strEnc := cecontext.EncodingFrom(ctx) - if strEnc != "" { - switch strEnc { - case Binary: - encoding = BinaryV02 - case Structured: - encoding = StructuredV02 - } - } - - _, r := observability.NewReporter(ctx, CodecObserved{o: reportEncode, c: encoding.Codec()}) - m, err := v.obsEncode(ctx, e, encoding) - if err != nil { - r.Error() - } else { - r.OK() - } - return m, err -} - -func (v CodecV02) obsEncode(ctx context.Context, e cloudevents.Event, encoding Encoding) (transport.Message, error) { - switch encoding { - case Default: - fallthrough - case BinaryV02: - return v.encodeBinary(ctx, e) - case StructuredV02: - return v.encodeStructured(ctx, e) - default: - return nil, fmt.Errorf("unknown encoding: %d", encoding) - } -} - -// Decode implements Codec.Decode -func (v CodecV02) Decode(ctx context.Context, msg transport.Message) (*cloudevents.Event, error) { - _, r := observability.NewReporter(ctx, CodecObserved{o: reportDecode, c: v.inspectEncoding(ctx, msg).Codec()}) // TODO: inspectEncoding is not free. - e, err := v.obsDecode(ctx, msg) - if err != nil { - r.Error() - } else { - r.OK() - } - return e, err -} - -func (v CodecV02) obsDecode(ctx context.Context, msg transport.Message) (*cloudevents.Event, error) { - switch v.inspectEncoding(ctx, msg) { - case BinaryV02: - return v.decodeBinary(ctx, msg) - case StructuredV02: - return v.decodeStructured(ctx, cloudevents.CloudEventsVersionV02, msg) - default: - return nil, transport.NewErrMessageEncodingUnknown("v02", TransportName) - } -} - -func (v CodecV02) encodeBinary(ctx context.Context, e cloudevents.Event) (transport.Message, error) { - header, err := v.toHeaders(e.Context.AsV02()) - if err != nil { - return nil, err - } - body, err := e.DataBytes() - if err != nil { - return nil, err - } - - msg := &Message{ - Header: header, - Body: body, - } - - return msg, nil -} - -func (v CodecV02) toHeaders(ec *cloudevents.EventContextV02) (http.Header, error) { - h := http.Header{} - h.Set("ce-specversion", ec.SpecVersion) - h.Set("ce-type", ec.Type) - h.Set("ce-source", ec.Source.String()) - h.Set("ce-id", ec.ID) - if ec.Time != nil && !ec.Time.IsZero() { - h.Set("ce-time", ec.Time.String()) - } - if ec.SchemaURL != nil { - h.Set("ce-schemaurl", ec.SchemaURL.String()) - } - if ec.ContentType != nil && *ec.ContentType != "" { - h.Set("Content-Type", *ec.ContentType) - } - for k, v := range ec.Extensions { - // Per spec, map-valued extensions are converted to a list of headers as: - // CE-attrib-key - if mapVal, ok := v.(map[string]interface{}); ok { - for subkey, subval := range mapVal { - encoded, err := json.Marshal(subval) - if err != nil { - return nil, err - } - h.Set("ce-"+k+"-"+subkey, string(encoded)) - } - continue - } - encoded, err := json.Marshal(v) - if err != nil { - return nil, err - } - h.Set("ce-"+k, string(encoded)) - } - - return h, nil -} - -func (v CodecV02) decodeBinary(ctx context.Context, msg transport.Message) (*cloudevents.Event, error) { - m, ok := msg.(*Message) - if !ok { - return nil, fmt.Errorf("failed to convert transport.Message to http.Message") - } - ca, err := v.fromHeaders(m.Header) - if err != nil { - return nil, err - } - var body interface{} - if len(m.Body) > 0 { - body = m.Body - } - return &cloudevents.Event{ - Context: &ca, - Data: body, - DataEncoded: body != nil, - }, nil -} - -func (v CodecV02) fromHeaders(h http.Header) (cloudevents.EventContextV02, error) { - // Normalize headers. - for k, v := range h { - ck := textproto.CanonicalMIMEHeaderKey(k) - if k != ck { - delete(h, k) - h[ck] = v - } - } - - ec := cloudevents.EventContextV02{} - - ec.SpecVersion = h.Get("ce-specversion") - h.Del("ce-specversion") - - ec.ID = h.Get("ce-id") - h.Del("ce-id") - - ec.Type = h.Get("ce-type") - h.Del("ce-type") - - source := types.ParseURLRef(h.Get("ce-source")) - if source != nil { - ec.Source = *source - } - h.Del("ce-source") - - var err error - ec.Time, err = types.ParseTimestamp(h.Get("ce-time")) - if err != nil { - return ec, err - } - h.Del("ce-time") - - ec.SchemaURL = types.ParseURLRef(h.Get("ce-schemaurl")) - h.Del("ce-schemaurl") - - contentType := h.Get("Content-Type") - if contentType != "" { - ec.ContentType = &contentType - } - h.Del("Content-Type") - - // At this point, we have deleted all the known headers. - // Everything left is assumed to be an extension. - - extensions := make(map[string]interface{}) - for k, v := range h { - if len(k) > len("ce-") && strings.EqualFold(k[:len("ce-")], "ce-") { - ak := strings.ToLower(k[len("ce-"):]) - if i := strings.Index(ak, "-"); i > 0 { - // attrib-key - attrib := ak[:i] - key := ak[(i + 1):] - if xv, ok := extensions[attrib]; ok { - if m, ok := xv.(map[string]interface{}); ok { - m[key] = v - continue - } - // TODO: revisit how we want to bubble errors up. - return ec, fmt.Errorf("failed to process map type extension") - } else { - m := make(map[string]interface{}) - m[key] = v - extensions[attrib] = m - } - } else { - // key - var tmp interface{} - if err := json.Unmarshal([]byte(v[0]), &tmp); err == nil { - extensions[ak] = tmp - } else { - // If we can't unmarshal the data, treat it as a string. - extensions[ak] = v[0] - } - } - } - } - if len(extensions) > 0 { - ec.Extensions = extensions - } - return ec, nil -} - -func (v CodecV02) inspectEncoding(ctx context.Context, msg transport.Message) Encoding { - version := msg.CloudEventsVersion() - if version != cloudevents.CloudEventsVersionV02 { - return Unknown - } - m, ok := msg.(*Message) - if !ok { - return Unknown - } - contentType := m.Header.Get("Content-Type") - if contentType == cloudevents.ApplicationCloudEventsJSON { - return StructuredV02 - } - return BinaryV02 -} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec_v03.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec_v03.go deleted file mode 100644 index b2b3c87ee9..0000000000 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec_v03.go +++ /dev/null @@ -1,302 +0,0 @@ -package http - -import ( - "context" - "encoding/json" - "fmt" - "net/http" - "net/textproto" - "strings" - - "github.com/cloudevents/sdk-go/pkg/cloudevents" - cecontext "github.com/cloudevents/sdk-go/pkg/cloudevents/context" - "github.com/cloudevents/sdk-go/pkg/cloudevents/observability" - "github.com/cloudevents/sdk-go/pkg/cloudevents/transport" - "github.com/cloudevents/sdk-go/pkg/cloudevents/types" -) - -// CodecV03 represents a http transport codec that uses CloudEvents spec v0.3 -type CodecV03 struct { - CodecStructured - - DefaultEncoding Encoding -} - -// Adheres to Codec -var _ transport.Codec = (*CodecV03)(nil) - -// Encode implements Codec.Encode -func (v CodecV03) Encode(ctx context.Context, e cloudevents.Event) (transport.Message, error) { - encoding := v.DefaultEncoding - strEnc := cecontext.EncodingFrom(ctx) - if strEnc != "" { - switch strEnc { - case Binary: - encoding = BinaryV03 - case Structured: - encoding = StructuredV03 - } - } - - _, r := observability.NewReporter(ctx, CodecObserved{o: reportEncode, c: encoding.Codec()}) - m, err := v.obsEncode(ctx, e, encoding) - if err != nil { - r.Error() - } else { - r.OK() - } - return m, err -} - -func (v CodecV03) obsEncode(ctx context.Context, e cloudevents.Event, encoding Encoding) (transport.Message, error) { - switch encoding { - case Default: - fallthrough - case BinaryV03: - return v.encodeBinary(ctx, e) - case StructuredV03: - return v.encodeStructured(ctx, e) - case BatchedV03: - return nil, fmt.Errorf("not implemented") - default: - return nil, fmt.Errorf("unknown encoding: %d", encoding) - } -} - -// Decode implements Codec.Decode -func (v CodecV03) Decode(ctx context.Context, msg transport.Message) (*cloudevents.Event, error) { - _, r := observability.NewReporter(ctx, CodecObserved{o: reportDecode, c: v.inspectEncoding(ctx, msg).Codec()}) // TODO: inspectEncoding is not free. - e, err := v.obsDecode(ctx, msg) - if err != nil { - r.Error() - } else { - r.OK() - } - return e, err -} - -func (v CodecV03) obsDecode(ctx context.Context, msg transport.Message) (*cloudevents.Event, error) { - switch v.inspectEncoding(ctx, msg) { - case BinaryV03: - return v.decodeBinary(ctx, msg) - case StructuredV03: - return v.decodeStructured(ctx, cloudevents.CloudEventsVersionV03, msg) - case BatchedV03: - return nil, fmt.Errorf("not implemented") - default: - return nil, transport.NewErrMessageEncodingUnknown("v03", TransportName) - } -} - -func (v CodecV03) encodeBinary(ctx context.Context, e cloudevents.Event) (transport.Message, error) { - header, err := v.toHeaders(e.Context.AsV03()) - if err != nil { - return nil, err - } - - body, err := e.DataBytes() - if err != nil { - return nil, err - } - - msg := &Message{ - Header: header, - Body: body, - } - - return msg, nil -} - -func (v CodecV03) toHeaders(ec *cloudevents.EventContextV03) (http.Header, error) { - h := http.Header{} - h.Set("ce-specversion", ec.SpecVersion) - h.Set("ce-type", ec.Type) - h.Set("ce-source", ec.Source.String()) - if ec.Subject != nil { - h.Set("ce-subject", *ec.Subject) - } - h.Set("ce-id", ec.ID) - if ec.Time != nil && !ec.Time.IsZero() { - h.Set("ce-time", ec.Time.String()) - } - if ec.SchemaURL != nil { - h.Set("ce-schemaurl", ec.SchemaURL.String()) - } - if ec.DataContentType != nil && *ec.DataContentType != "" { - h.Set("Content-Type", *ec.DataContentType) - } - if ec.DataContentEncoding != nil { - h.Set("ce-datacontentencoding", *ec.DataContentEncoding) - } - - for k, v := range ec.Extensions { - k = strings.ToLower(k) - // Per spec, map-valued extensions are converted to a list of headers as: - // CE-attrib-key - switch v.(type) { - case string: - h.Set("ce-"+k, v.(string)) - - case map[string]interface{}: - mapVal := v.(map[string]interface{}) - - for subkey, subval := range mapVal { - if subvalstr, ok := v.(string); ok { - h.Set("ce-"+k+"-"+subkey, subvalstr) - continue - } - - encoded, err := json.Marshal(subval) - if err != nil { - return nil, err - } - h.Set("ce-"+k+"-"+subkey, string(encoded)) - } - - default: - encoded, err := json.Marshal(v) - if err != nil { - return nil, err - } - h.Set("ce-"+k, string(encoded)) - } - } - - return h, nil -} - -func (v CodecV03) decodeBinary(ctx context.Context, msg transport.Message) (*cloudevents.Event, error) { - m, ok := msg.(*Message) - if !ok { - return nil, fmt.Errorf("failed to convert transport.Message to http.Message") - } - ca, err := v.fromHeaders(m.Header) - if err != nil { - return nil, err - } - var body interface{} - if len(m.Body) > 0 { - body = m.Body - } - return &cloudevents.Event{ - Context: &ca, - Data: body, - DataEncoded: body != nil, - }, nil -} - -func (v CodecV03) fromHeaders(h http.Header) (cloudevents.EventContextV03, error) { - // Normalize headers. - for k, v := range h { - ck := textproto.CanonicalMIMEHeaderKey(k) - if k != ck { - delete(h, k) - h[ck] = v - } - } - - ec := cloudevents.EventContextV03{} - - ec.SpecVersion = h.Get("ce-specversion") - h.Del("ce-specversion") - - ec.ID = h.Get("ce-id") - h.Del("ce-id") - - ec.Type = h.Get("ce-type") - h.Del("ce-type") - - source := types.ParseURLRef(h.Get("ce-source")) - if source != nil { - ec.Source = *source - } - h.Del("ce-source") - - subject := h.Get("ce-subject") - if subject != "" { - ec.Subject = &subject - } - h.Del("ce-subject") - - var err error - ec.Time, err = types.ParseTimestamp(h.Get("ce-time")) - if err != nil { - return ec, err - } - h.Del("ce-time") - - ec.SchemaURL = types.ParseURLRef(h.Get("ce-schemaurl")) - h.Del("ce-schemaurl") - - contentType := h.Get("Content-Type") - if contentType != "" { - ec.DataContentType = &contentType - } - h.Del("Content-Type") - - dataContentEncoding := h.Get("ce-datacontentencoding") - if dataContentEncoding != "" { - ec.DataContentEncoding = &dataContentEncoding - } - h.Del("ce-datacontentencoding") - - // At this point, we have deleted all the known headers. - // Everything left is assumed to be an extension. - - extensions := make(map[string]interface{}) - for k, v := range h { - k = strings.ToLower(k) - if len(k) > len("ce-") && strings.EqualFold(k[:len("ce-")], "ce-") { - ak := strings.ToLower(k[len("ce-"):]) - if i := strings.Index(ak, "-"); i > 0 { - // attrib-key - attrib := ak[:i] - key := ak[(i + 1):] - if xv, ok := extensions[attrib]; ok { - if m, ok := xv.(map[string]interface{}); ok { - m[key] = v - continue - } - // TODO: revisit how we want to bubble errors up. - return ec, fmt.Errorf("failed to process map type extension") - } else { - m := make(map[string]interface{}) - m[key] = v - extensions[attrib] = m - } - } else { - // key - var tmp interface{} - if err := json.Unmarshal([]byte(v[0]), &tmp); err == nil { - extensions[ak] = tmp - } else { - // If we can't unmarshal the data, treat it as a string. - extensions[ak] = v[0] - } - } - } - } - if len(extensions) > 0 { - ec.Extensions = extensions - } - return ec, nil -} - -func (v CodecV03) inspectEncoding(ctx context.Context, msg transport.Message) Encoding { - version := msg.CloudEventsVersion() - if version != cloudevents.CloudEventsVersionV03 { - return Unknown - } - m, ok := msg.(*Message) - if !ok { - return Unknown - } - contentType := m.Header.Get("Content-Type") - if contentType == cloudevents.ApplicationCloudEventsJSON { - return StructuredV03 - } - if contentType == cloudevents.ApplicationCloudEventsBatchJSON { - return BatchedV03 - } - return BinaryV03 -} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec_v1.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec_v1.go deleted file mode 100644 index 4ebe7422b0..0000000000 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/codec_v1.go +++ /dev/null @@ -1,245 +0,0 @@ -package http - -import ( - "context" - "fmt" - "net/http" - "net/textproto" - "strings" - - "github.com/cloudevents/sdk-go/pkg/cloudevents" - cecontext "github.com/cloudevents/sdk-go/pkg/cloudevents/context" - "github.com/cloudevents/sdk-go/pkg/cloudevents/observability" - "github.com/cloudevents/sdk-go/pkg/cloudevents/transport" - "github.com/cloudevents/sdk-go/pkg/cloudevents/types" -) - -// CodecV1 represents a http transport codec that uses CloudEvents spec v1.0 -type CodecV1 struct { - CodecStructured - - DefaultEncoding Encoding -} - -// Adheres to Codec -var _ transport.Codec = (*CodecV1)(nil) - -// Encode implements Codec.Encode -func (v CodecV1) Encode(ctx context.Context, e cloudevents.Event) (transport.Message, error) { - encoding := v.DefaultEncoding - strEnc := cecontext.EncodingFrom(ctx) - if strEnc != "" { - switch strEnc { - case Binary: - encoding = BinaryV1 - case Structured: - encoding = StructuredV1 - } - } - - _, r := observability.NewReporter(ctx, CodecObserved{o: reportEncode, c: encoding.Codec()}) - m, err := v.obsEncode(ctx, e, encoding) - if err != nil { - r.Error() - } else { - r.OK() - } - return m, err -} - -func (v CodecV1) obsEncode(ctx context.Context, e cloudevents.Event, encoding Encoding) (transport.Message, error) { - switch encoding { - case Default: - fallthrough - case BinaryV1: - return v.encodeBinary(ctx, e) - case StructuredV1: - return v.encodeStructured(ctx, e) - case BatchedV1: - return nil, fmt.Errorf("not implemented") - default: - return nil, fmt.Errorf("unknown encoding: %d", encoding) - } -} - -// Decode implements Codec.Decode -func (v CodecV1) Decode(ctx context.Context, msg transport.Message) (*cloudevents.Event, error) { - _, r := observability.NewReporter(ctx, CodecObserved{o: reportDecode, c: v.inspectEncoding(ctx, msg).Codec()}) // TODO: inspectEncoding is not free. - e, err := v.obsDecode(ctx, msg) - if err != nil { - r.Error() - } else { - r.OK() - } - return e, err -} - -func (v CodecV1) obsDecode(ctx context.Context, msg transport.Message) (*cloudevents.Event, error) { - switch v.inspectEncoding(ctx, msg) { - case BinaryV1: - return v.decodeBinary(ctx, msg) - case StructuredV1: - return v.decodeStructured(ctx, cloudevents.CloudEventsVersionV1, msg) - case BatchedV1: - return nil, fmt.Errorf("not implemented") - default: - return nil, transport.NewErrMessageEncodingUnknown("V1", TransportName) - } -} - -func (v CodecV1) encodeBinary(ctx context.Context, e cloudevents.Event) (transport.Message, error) { - header, err := v.toHeaders(e.Context.AsV1()) - if err != nil { - return nil, err - } - - body, err := e.DataBytes() - if err != nil { - return nil, err - } - - msg := &Message{ - Header: header, - Body: body, - } - - return msg, nil -} - -func (v CodecV1) toHeaders(ec *cloudevents.EventContextV1) (http.Header, error) { - h := http.Header{} - h.Set("ce-specversion", ec.SpecVersion) - h.Set("ce-type", ec.Type) - h.Set("ce-source", ec.Source.String()) - if ec.Subject != nil { - h.Set("ce-subject", *ec.Subject) - } - h.Set("ce-id", ec.ID) - if ec.Time != nil && !ec.Time.IsZero() { - h.Set("ce-time", ec.Time.String()) - } - if ec.DataSchema != nil { - h.Set("ce-dataschema", ec.DataSchema.String()) - } - if ec.DataContentType != nil && *ec.DataContentType != "" { - h.Set("Content-Type", *ec.DataContentType) - } - - for k, v := range ec.Extensions { - k = strings.ToLower(k) - // Per spec, extensions are strings and converted to a list of headers as: - // ce-key: value - cstr, err := types.Format(v) - if err != nil { - return h, err - } - h.Set("ce-"+k, cstr) - } - - return h, nil -} - -func (v CodecV1) decodeBinary(ctx context.Context, msg transport.Message) (*cloudevents.Event, error) { - m, ok := msg.(*Message) - if !ok { - return nil, fmt.Errorf("failed to convert transport.Message to http.Message") - } - ca, err := v.fromHeaders(m.Header) - if err != nil { - return nil, err - } - var body interface{} - if len(m.Body) > 0 { - body = m.Body - } - return &cloudevents.Event{ - Context: &ca, - Data: body, - DataEncoded: body != nil, - }, nil -} - -func (v CodecV1) fromHeaders(h http.Header) (cloudevents.EventContextV1, error) { - // Normalize headers. - for k, v := range h { - ck := textproto.CanonicalMIMEHeaderKey(k) - if k != ck { - delete(h, k) - h[ck] = v - } - } - - ec := cloudevents.EventContextV1{} - - ec.SpecVersion = h.Get("ce-specversion") - h.Del("ce-specversion") - - ec.ID = h.Get("ce-id") - h.Del("ce-id") - - ec.Type = h.Get("ce-type") - h.Del("ce-type") - - source := types.ParseURIRef(h.Get("ce-source")) - if source != nil { - ec.Source = *source - } - h.Del("ce-source") - - subject := h.Get("ce-subject") - if subject != "" { - ec.Subject = &subject - } - h.Del("ce-subject") - - var err error - ec.Time, err = types.ParseTimestamp(h.Get("ce-time")) - if err != nil { - return ec, err - } - h.Del("ce-time") - - ec.DataSchema = types.ParseURI(h.Get("ce-dataschema")) - h.Del("ce-dataschema") - - contentType := h.Get("Content-Type") - if contentType != "" { - ec.DataContentType = &contentType - } - h.Del("Content-Type") - - // At this point, we have deleted all the known headers. - // Everything left is assumed to be an extension. - - extensions := make(map[string]interface{}) - for k := range h { - k = strings.ToLower(k) - if len(k) > len("ce-") && strings.EqualFold(k[:len("ce-")], "ce-") { - ak := strings.ToLower(k[len("ce-"):]) - extensions[ak] = h.Get(k) - } - } - if len(extensions) > 0 { - ec.Extensions = extensions - } - return ec, nil -} - -func (v CodecV1) inspectEncoding(ctx context.Context, msg transport.Message) Encoding { - version := msg.CloudEventsVersion() - if version != cloudevents.CloudEventsVersionV1 { - return Unknown - } - m, ok := msg.(*Message) - if !ok { - return Unknown - } - contentType := m.Header.Get("Content-Type") - if contentType == cloudevents.ApplicationCloudEventsJSON { - return StructuredV1 - } - if contentType == cloudevents.ApplicationCloudEventsBatchJSON { - return BatchedV1 - } - return BinaryV1 -} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/context.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/context.go deleted file mode 100644 index cf8b8510d7..0000000000 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/context.go +++ /dev/null @@ -1,207 +0,0 @@ -package http - -import ( - "context" - "fmt" - "net/http" - "net/url" - "strconv" - "strings" -) - -// TransportContext allows a Receiver to understand the context of a request. -type TransportContext struct { - URI string - Host string - Method string - Header http.Header - StatusCode int - - // IgnoreHeaderPrefixes controls what comes back from AttendToHeaders. - // AttendToHeaders controls what is output for .String() - IgnoreHeaderPrefixes []string -} - -// NewTransportContext creates a new TransportContext from a http.Request. -func NewTransportContext(req *http.Request) TransportContext { - var tx *TransportContext - if req != nil { - tx = &TransportContext{ - URI: req.RequestURI, - Host: req.Host, - Method: req.Method, - Header: req.Header, - } - } else { - tx = &TransportContext{} - } - tx.AddIgnoreHeaderPrefix("accept-encoding", "user-agent", "connection", "content-type") - return *tx -} - -// NewTransportContextFromResponse creates a new TransportContext from a http.Response. -// If `res` is nil, it returns a context with a http.StatusInternalServerError status code. -func NewTransportContextFromResponse(res *http.Response) TransportContext { - var tx *TransportContext - if res != nil { - tx = &TransportContext{ - Header: res.Header, - StatusCode: res.StatusCode, - } - } else { - tx = &TransportContext{StatusCode: http.StatusInternalServerError} - } - tx.AddIgnoreHeaderPrefix("accept-encoding", "user-agent", "connection", "content-type") - return *tx -} - -// TransportResponseContext allows a Receiver response with http transport specific fields. -type TransportResponseContext struct { - // Header will be merged with the response headers. - Header http.Header -} - -// AttendToHeaders returns the list of headers that exist in the TransportContext that are not currently in -// tx.IgnoreHeaderPrefix. -func (tx TransportContext) AttendToHeaders() []string { - a := []string(nil) - if tx.Header != nil && len(tx.Header) > 0 { - for k := range tx.Header { - if tx.shouldIgnoreHeader(k) { - continue - } - a = append(a, k) - } - } - return a -} - -func (tx TransportContext) shouldIgnoreHeader(h string) bool { - for _, v := range tx.IgnoreHeaderPrefixes { - if strings.HasPrefix(strings.ToLower(h), strings.ToLower(v)) { - return true - } - } - return false -} - -// String generates a pretty-printed version of the resource as a string. -func (tx TransportContext) String() string { - b := strings.Builder{} - - b.WriteString("Transport Context,\n") - - empty := b.Len() - - if tx.URI != "" { - b.WriteString(" URI: " + tx.URI + "\n") - } - if tx.Host != "" { - b.WriteString(" Host: " + tx.Host + "\n") - } - - if tx.Method != "" { - b.WriteString(" Method: " + tx.Method + "\n") - } - - if tx.StatusCode != 0 { - b.WriteString(" StatusCode: " + strconv.Itoa(tx.StatusCode) + "\n") - } - - if tx.Header != nil && len(tx.Header) > 0 { - b.WriteString(" Header:\n") - for _, k := range tx.AttendToHeaders() { - b.WriteString(fmt.Sprintf(" %s: %s\n", k, tx.Header.Get(k))) - } - } - - if b.Len() == empty { - b.WriteString(" nil\n") - } - - return b.String() -} - -// AddIgnoreHeaderPrefix controls what header key is to be attended to and/or printed. -func (tx *TransportContext) AddIgnoreHeaderPrefix(prefix ...string) { - if tx.IgnoreHeaderPrefixes == nil { - tx.IgnoreHeaderPrefixes = []string(nil) - } - tx.IgnoreHeaderPrefixes = append(tx.IgnoreHeaderPrefixes, prefix...) -} - -// Opaque key type used to store TransportContext -type transportContextKeyType struct{} - -var transportContextKey = transportContextKeyType{} - -// WithTransportContext return a context with the given TransportContext into the provided context object. -func WithTransportContext(ctx context.Context, tcxt TransportContext) context.Context { - return context.WithValue(ctx, transportContextKey, tcxt) -} - -// TransportContextFrom pulls a TransportContext out of a context. Always -// returns a non-nil object. -func TransportContextFrom(ctx context.Context) TransportContext { - tctx := ctx.Value(transportContextKey) - if tctx != nil { - if tx, ok := tctx.(TransportContext); ok { - return tx - } - if tx, ok := tctx.(*TransportContext); ok { - return *tx - } - } - return TransportContext{} -} - -// Opaque key type used to store Headers -type headerKeyType struct{} - -var headerKey = headerKeyType{} - -// ContextWithHeader returns a context with a header added to the given context. -// Can be called multiple times to set multiple header key/value pairs. -func ContextWithHeader(ctx context.Context, key, value string) context.Context { - header := HeaderFrom(ctx) - header.Add(key, value) - return context.WithValue(ctx, headerKey, header) -} - -// HeaderFrom extracts the header object in the given context. Always returns a non-nil Header. -func HeaderFrom(ctx context.Context) http.Header { - ch := http.Header{} - header := ctx.Value(headerKey) - if header != nil { - if h, ok := header.(http.Header); ok { - copyHeaders(h, ch) - } - } - return ch -} - -// Opaque key type used to store long poll target. -type longPollTargetKeyType struct{} - -var longPollTargetKey = longPollTargetKeyType{} - -// WithLongPollTarget returns a new context with the given long poll target. -// `target` should be a full URL and will be injected into the long polling -// http request within StartReceiver. -func ContextWithLongPollTarget(ctx context.Context, target string) context.Context { - return context.WithValue(ctx, longPollTargetKey, target) -} - -// LongPollTargetFrom looks in the given context and returns `target` as a -// parsed url if found and valid, otherwise nil. -func LongPollTargetFrom(ctx context.Context) *url.URL { - c := ctx.Value(longPollTargetKey) - if c != nil { - if s, ok := c.(string); ok && s != "" { - if target, err := url.Parse(s); err == nil { - return target - } - } - } - return nil -} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/doc.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/doc.go deleted file mode 100644 index 1a171e46e1..0000000000 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/doc.go +++ /dev/null @@ -1,4 +0,0 @@ -/* -Package http implements the CloudEvent transport implementation using HTTP. -*/ -package http diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/encoding.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/encoding.go deleted file mode 100644 index 60f3e3ea3e..0000000000 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/encoding.go +++ /dev/null @@ -1,205 +0,0 @@ -package http - -import ( - "context" - - "github.com/cloudevents/sdk-go/pkg/cloudevents" - cecontext "github.com/cloudevents/sdk-go/pkg/cloudevents/context" -) - -// Encoding to use for HTTP transport. -type Encoding int32 - -type EncodingSelector func(context.Context, cloudevents.Event) Encoding - -const ( - // Default - Default Encoding = iota - // BinaryV01 is Binary CloudEvents spec v0.1. - BinaryV01 - // StructuredV01 is Structured CloudEvents spec v0.1. - StructuredV01 - // BinaryV02 is Binary CloudEvents spec v0.2. - BinaryV02 - // StructuredV02 is Structured CloudEvents spec v0.2. - StructuredV02 - // BinaryV03 is Binary CloudEvents spec v0.3. - BinaryV03 - // StructuredV03 is Structured CloudEvents spec v0.3. - StructuredV03 - // BatchedV03 is Batched CloudEvents spec v0.3. - BatchedV03 - // BinaryV1 is Binary CloudEvents spec v1.0. - BinaryV1 - // StructuredV03 is Structured CloudEvents spec v1.0. - StructuredV1 - // BatchedV1 is Batched CloudEvents spec v1.0. - BatchedV1 - - // Unknown is unknown. - Unknown - - // Binary is used for Context Based Encoding Selections to use the - // DefaultBinaryEncodingSelectionStrategy - Binary = "binary" - - // Structured is used for Context Based Encoding Selections to use the - // DefaultStructuredEncodingSelectionStrategy - Structured = "structured" - - // Batched is used for Context Based Encoding Selections to use the - // DefaultStructuredEncodingSelectionStrategy - Batched = "batched" -) - -func ContextBasedEncodingSelectionStrategy(ctx context.Context, e cloudevents.Event) Encoding { - encoding := cecontext.EncodingFrom(ctx) - switch encoding { - case "", Binary: - return DefaultBinaryEncodingSelectionStrategy(ctx, e) - case Structured: - return DefaultStructuredEncodingSelectionStrategy(ctx, e) - } - return Default -} - -// DefaultBinaryEncodingSelectionStrategy implements a selection process for -// which binary encoding to use based on spec version of the event. -func DefaultBinaryEncodingSelectionStrategy(ctx context.Context, e cloudevents.Event) Encoding { - switch e.SpecVersion() { - case cloudevents.CloudEventsVersionV01: - return BinaryV01 - case cloudevents.CloudEventsVersionV02: - return BinaryV02 - case cloudevents.CloudEventsVersionV03: - return BinaryV03 - case cloudevents.CloudEventsVersionV1: - return BinaryV1 - } - // Unknown version, return Default. - return Default -} - -// DefaultStructuredEncodingSelectionStrategy implements a selection process -// for which structured encoding to use based on spec version of the event. -func DefaultStructuredEncodingSelectionStrategy(ctx context.Context, e cloudevents.Event) Encoding { - switch e.SpecVersion() { - case cloudevents.CloudEventsVersionV01: - return StructuredV01 - case cloudevents.CloudEventsVersionV02: - return StructuredV02 - case cloudevents.CloudEventsVersionV03: - return StructuredV03 - case cloudevents.CloudEventsVersionV1: - return StructuredV1 - } - // Unknown version, return Default. - return Default -} - -// String pretty-prints the encoding as a string. -func (e Encoding) String() string { - switch e { - case Default: - return "Default Encoding " + e.Version() - - // Binary - case BinaryV01, BinaryV02, BinaryV03, BinaryV1: - return "Binary Encoding " + e.Version() - - // Structured - case StructuredV01, StructuredV02, StructuredV03, StructuredV1: - return "Structured Encoding " + e.Version() - - // Batched - case BatchedV03, BatchedV1: - return "Batched Encoding " + e.Version() - - default: - return "Unknown Encoding" - } -} - -// Version pretty-prints the encoding version as a string. -func (e Encoding) Version() string { - switch e { - case Default: - return "Default" - - // Version 0.1 - case BinaryV01, StructuredV01: - return "v0.1" - - // Version 0.2 - case BinaryV02, StructuredV02: - return "v0.2" - - // Version 0.3 - case BinaryV03, StructuredV03, BatchedV03: - return "v0.3" - - // Version 1.0 - case BinaryV1, StructuredV1, BatchedV1: - return "v1.0" - - // Unknown - default: - return "Unknown" - } -} - -// Codec creates a structured string to represent the the codec version. -func (e Encoding) Codec() string { - switch e { - case Default: - return "default" - - // Version 0.1 - case BinaryV01: - return "binary/v0.1" - case StructuredV01: - return "structured/v0.1" - - // Version 0.2 - case BinaryV02: - return "binary/v0.2" - case StructuredV02: - return "structured/v0.2" - - // Version 0.3 - case BinaryV03: - return "binary/v0.3" - case StructuredV03: - return "structured/v0.3" - case BatchedV03: - return "batched/v0.3" - - // Version 1.0 - case BinaryV1: - return "binary/v1.0" - case StructuredV1: - return "structured/v1.0" - case BatchedV1: - return "batched/v1.0" - - // Unknown - default: - return "unknown" - } -} - -// Name creates a string to represent the the codec name. -func (e Encoding) Name() string { - switch e { - case Default: - return Binary - case BinaryV01, BinaryV02, BinaryV03, BinaryV1: - return Binary - case StructuredV01, StructuredV02, StructuredV03, StructuredV1: - return Structured - case BatchedV03, BatchedV1: - return Batched - default: - return Binary - } -} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/message.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/message.go deleted file mode 100644 index a6cdbecb1c..0000000000 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/message.go +++ /dev/null @@ -1,148 +0,0 @@ -package http - -import ( - "bytes" - "encoding/json" - - "io" - "io/ioutil" - "net/http" - - "github.com/cloudevents/sdk-go/pkg/cloudevents/transport" -) - -// type check that this transport message impl matches the contract -var _ transport.Message = (*Message)(nil) - -// Message is an http transport message. -type Message struct { - Header http.Header - Body []byte -} - -// Response is an http transport response. -type Response struct { - StatusCode int - Message -} - -// CloudEventsVersion inspects a message and tries to discover and return the -// CloudEvents spec version. -func (m Message) CloudEventsVersion() string { - - // TODO: the impl of this method needs to move into the codec. - - if m.Header != nil { - // Try headers first. - // v0.1, cased from the spec - // Note: don't pass literal string direct to m.Header[] so that - // go vet won't complain about non-canonical case. - name := "CE-CloudEventsVersion" - if v := m.Header[name]; len(v) == 1 { - return v[0] - } - // v0.2, canonical casing - if ver := m.Header.Get("CE-CloudEventsVersion"); ver != "" { - return ver - } - - // v0.2, cased from the spec - name = "ce-specversion" - if v := m.Header[name]; len(v) == 1 { - return v[0] - } - // v0.2, canonical casing - name = "ce-specversion" - if ver := m.Header.Get(name); ver != "" { - return ver - } - } - - // Then try the data body. - // TODO: we need to use the correct decoding based on content type. - - raw := make(map[string]json.RawMessage) - if err := json.Unmarshal(m.Body, &raw); err != nil { - return "" - } - - // v0.1 - if v, ok := raw["cloudEventsVersion"]; ok { - var version string - if err := json.Unmarshal(v, &version); err != nil { - return "" - } - return version - } - - // v0.2 - if v, ok := raw["specversion"]; ok { - var version string - if err := json.Unmarshal(v, &version); err != nil { - return "" - } - return version - } - - return "" -} - -func readAllClose(r io.ReadCloser) ([]byte, error) { - if r != nil { - defer r.Close() - return ioutil.ReadAll(r) - } - return nil, nil -} - -// NewMessage creates a new message from the Header and Body of -// an http.Request or http.Response -func NewMessage(header http.Header, body io.ReadCloser) (*Message, error) { - var m Message - err := m.Init(header, body) - return &m, err -} - -// NewResponse creates a new response from the Header and Body of -// an http.Request or http.Response -func NewResponse(header http.Header, body io.ReadCloser, statusCode int) (*Response, error) { - resp := Response{StatusCode: statusCode} - err := resp.Init(header, body) - return &resp, err -} - -// Copy copies a new Body and Header into a message, replacing any previous data. -func (m *Message) Init(header http.Header, body io.ReadCloser) error { - m.Header = make(http.Header, len(header)) - copyHeadersEnsure(header, &m.Header) - var err error - m.Body, err = readAllClose(body) - return err -} - -func (m *Message) copyOut(header *http.Header, body *io.ReadCloser) { - copyHeadersEnsure(m.Header, header) - *body = nil - if m.Body != nil { - copy := append([]byte(nil), m.Body...) - *body = ioutil.NopCloser(bytes.NewBuffer(copy)) - } -} - -// ToRequest updates a http.Request from a Message. -// Replaces Body, ContentLength and Method, updates Headers. -// Panic if req is nil -func (m *Message) ToRequest(req *http.Request) { - m.copyOut(&req.Header, &req.Body) - req.ContentLength = int64(len(m.Body)) - req.Method = http.MethodPost -} - -// ToResponse updates a http.Response from a Response. -// Replaces Body, updates Headers. -// Panic if resp is nil -func (m *Response) ToResponse(resp *http.Response) { - m.copyOut(&resp.Header, &resp.Body) - resp.ContentLength = int64(len(m.Body)) - resp.StatusCode = m.StatusCode -} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/observability.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/observability.go deleted file mode 100644 index 1da56dc2ad..0000000000 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/observability.go +++ /dev/null @@ -1,109 +0,0 @@ -package http - -import ( - "fmt" - - "github.com/cloudevents/sdk-go/pkg/cloudevents/observability" - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" -) - -var ( - // LatencyMs measures the latency in milliseconds for the http transport - // methods for CloudEvents. - LatencyMs = stats.Float64( - "cloudevents.io/sdk-go/transport/http/latency", - "The latency in milliseconds for the http transport methods for CloudEvents.", - "ms") -) - -var ( - // LatencyView is an OpenCensus view that shows http transport method latency. - LatencyView = &view.View{ - Name: "transport/http/latency", - Measure: LatencyMs, - Description: "The distribution of latency inside of http transport for CloudEvents.", - Aggregation: view.Distribution(0, .01, .1, 1, 10, 100, 1000, 10000), - TagKeys: observability.LatencyTags(), - } -) - -type observed int32 - -// Adheres to Observable -var _ observability.Observable = observed(0) - -const ( - reportSend observed = iota - reportReceive - reportServeHTTP - reportEncode - reportDecode -) - -// TraceName implements Observable.TraceName -func (o observed) TraceName() string { - switch o { - case reportSend: - return "transport/http/send" - case reportReceive: - return "transport/http/receive" - case reportServeHTTP: - return "transport/http/servehttp" - case reportEncode: - return "transport/http/encode" - case reportDecode: - return "transport/http/decode" - default: - return "transport/http/unknown" - } -} - -// MethodName implements Observable.MethodName -func (o observed) MethodName() string { - switch o { - case reportSend: - return "send" - case reportReceive: - return "receive" - case reportServeHTTP: - return "servehttp" - case reportEncode: - return "encode" - case reportDecode: - return "decode" - default: - return "unknown" - } -} - -// LatencyMs implements Observable.LatencyMs -func (o observed) LatencyMs() *stats.Float64Measure { - return LatencyMs -} - -// CodecObserved is a wrapper to append version to observed. -type CodecObserved struct { - // Method - o observed - // Codec - c string -} - -// Adheres to Observable -var _ observability.Observable = (*CodecObserved)(nil) - -// TraceName implements Observable.TraceName -func (c CodecObserved) TraceName() string { - return fmt.Sprintf("%s/%s", c.o.TraceName(), c.c) -} - -// MethodName implements Observable.MethodName -func (c CodecObserved) MethodName() string { - return fmt.Sprintf("%s/%s", c.o.MethodName(), c.c) -} - -// LatencyMs implements Observable.LatencyMs -func (c CodecObserved) LatencyMs() *stats.Float64Measure { - return c.o.LatencyMs() -} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/options.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/options.go deleted file mode 100644 index 0276157fcc..0000000000 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/options.go +++ /dev/null @@ -1,266 +0,0 @@ -package http - -import ( - "fmt" - "net" - nethttp "net/http" - "net/url" - "strings" - "time" -) - -// Option is the function signature required to be considered an http.Option. -type Option func(*Transport) error - -// WithTarget sets the outbound recipient of cloudevents when using an HTTP -// request. -func WithTarget(targetUrl string) Option { - return func(t *Transport) error { - if t == nil { - return fmt.Errorf("http target option can not set nil transport") - } - targetUrl = strings.TrimSpace(targetUrl) - if targetUrl != "" { - var err error - var target *url.URL - target, err = url.Parse(targetUrl) - if err != nil { - return fmt.Errorf("http target option failed to parse target url: %s", err.Error()) - } - - if t.Req == nil { - t.Req = &nethttp.Request{ - Method: nethttp.MethodPost, - } - } - t.Req.URL = target - return nil - } - return fmt.Errorf("http target option was empty string") - } -} - -// WithMethod sets the outbound recipient of cloudevents when using an HTTP -// request. -func WithMethod(method string) Option { - return func(t *Transport) error { - if t == nil { - return fmt.Errorf("http method option can not set nil transport") - } - method = strings.TrimSpace(method) - if method != "" { - if t.Req == nil { - t.Req = &nethttp.Request{} - } - t.Req.Method = method - return nil - } - return fmt.Errorf("http method option was empty string") - } -} - -// WithHeader sets an additional default outbound header for all cloudevents -// when using an HTTP request. -func WithHeader(key, value string) Option { - return func(t *Transport) error { - if t == nil { - return fmt.Errorf("http header option can not set nil transport") - } - key = strings.TrimSpace(key) - if key != "" { - if t.Req == nil { - t.Req = &nethttp.Request{} - } - if t.Req.Header == nil { - t.Req.Header = nethttp.Header{} - } - t.Req.Header.Add(key, value) - return nil - } - return fmt.Errorf("http header option was empty string") - } -} - -// WithShutdownTimeout sets the shutdown timeout when the http server is being shutdown. -func WithShutdownTimeout(timeout time.Duration) Option { - return func(t *Transport) error { - if t == nil { - return fmt.Errorf("http shutdown timeout option can not set nil transport") - } - t.ShutdownTimeout = &timeout - return nil - } -} - -// WithEncoding sets the encoding for clients with HTTP transports. -func WithEncoding(encoding Encoding) Option { - return func(t *Transport) error { - if t == nil { - return fmt.Errorf("http encoding option can not set nil transport") - } - t.Encoding = encoding - return nil - } -} - -// WithDefaultEncodingSelector sets the encoding selection strategy for -// default encoding selections based on Event. -func WithDefaultEncodingSelector(fn EncodingSelector) Option { - return func(t *Transport) error { - if t == nil { - return fmt.Errorf("http default encoding selector option can not set nil transport") - } - if fn != nil { - t.DefaultEncodingSelectionFn = fn - return nil - } - return fmt.Errorf("http fn for DefaultEncodingSelector was nil") - } -} - -// WithContextBasedEncoding sets the encoding selection strategy for -// default encoding selections based context and then on Event, the encoded -// event will be the given version in the encoding specified by the given -// context, or Binary if not set. -func WithContextBasedEncoding() Option { - return func(t *Transport) error { - if t == nil { - return fmt.Errorf("http context based encoding option can not set nil transport") - } - - t.DefaultEncodingSelectionFn = ContextBasedEncodingSelectionStrategy - return nil - } -} - -// WithBinaryEncoding sets the encoding selection strategy for -// default encoding selections based on Event, the encoded event will be the -// given version in Binary form. -func WithBinaryEncoding() Option { - return func(t *Transport) error { - if t == nil { - return fmt.Errorf("http binary encoding option can not set nil transport") - } - - t.DefaultEncodingSelectionFn = DefaultBinaryEncodingSelectionStrategy - return nil - } -} - -// WithStructuredEncoding sets the encoding selection strategy for -// default encoding selections based on Event, the encoded event will be the -// given version in Structured form. -func WithStructuredEncoding() Option { - return func(t *Transport) error { - if t == nil { - return fmt.Errorf("http structured encoding option can not set nil transport") - } - - t.DefaultEncodingSelectionFn = DefaultStructuredEncodingSelectionStrategy - return nil - } -} - -func checkListen(t *Transport, prefix string) error { - switch { - case t.Port != nil: - return fmt.Errorf("%v port already set", prefix) - case t.listener != nil: - return fmt.Errorf("%v listener already set", prefix) - } - return nil -} - -// WithPort sets the listening port for StartReceiver. -// Only one of WithListener or WithPort is allowed. -func WithPort(port int) Option { - return func(t *Transport) error { - if t == nil { - return fmt.Errorf("http port option can not set nil transport") - } - if port < 0 || port > 65535 { - return fmt.Errorf("http port option was given an invalid port: %d", port) - } - if err := checkListen(t, "http port option"); err != nil { - return err - } - t.setPort(port) - return nil - } -} - -// WithListener sets the listener for StartReceiver. -// Only one of WithListener or WithPort is allowed. -func WithListener(l net.Listener) Option { - return func(t *Transport) error { - if t == nil { - return fmt.Errorf("http listener option can not set nil transport") - } - if err := checkListen(t, "http port option"); err != nil { - return err - } - t.listener = l - _, err := t.listen() - return err - } -} - -// WithPath sets the path to receive cloudevents on for HTTP transports. -func WithPath(path string) Option { - return func(t *Transport) error { - if t == nil { - return fmt.Errorf("http path option can not set nil transport") - } - path = strings.TrimSpace(path) - if len(path) == 0 { - return fmt.Errorf("http path option was given an invalid path: %q", path) - } - t.Path = path - return nil - } -} - -// Middleware is a function that takes an existing http.Handler and wraps it in middleware, -// returning the wrapped http.Handler. -type Middleware func(next nethttp.Handler) nethttp.Handler - -// WithMiddleware adds an HTTP middleware to the transport. It may be specified multiple times. -// Middleware is applied to everything before it. For example -// `NewClient(WithMiddleware(foo), WithMiddleware(bar))` would result in `bar(foo(original))`. -func WithMiddleware(middleware Middleware) Option { - return func(t *Transport) error { - if t == nil { - return fmt.Errorf("http middleware option can not set nil transport") - } - t.middleware = append(t.middleware, middleware) - return nil - } -} - -// WithLongPollTarget sets the receivers URL to perform long polling after -// StartReceiver is called. -func WithLongPollTarget(targetUrl string) Option { - return func(t *Transport) error { - if t == nil { - return fmt.Errorf("http long poll target option can not set nil transport") - } - targetUrl = strings.TrimSpace(targetUrl) - if targetUrl != "" { - var err error - var target *url.URL - target, err = url.Parse(targetUrl) - if err != nil { - return fmt.Errorf("http long poll target option failed to parse target url: %s", err.Error()) - } - - if t.LongPollReq == nil { - t.LongPollReq = &nethttp.Request{ - Method: nethttp.MethodGet, - } - } - t.LongPollReq.URL = target - return nil - } - return fmt.Errorf("http long poll target option was empty string") - } -} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/transport.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/transport.go deleted file mode 100644 index 4dad3d7a55..0000000000 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/http/transport.go +++ /dev/null @@ -1,681 +0,0 @@ -package http - -import ( - "context" - "errors" - "fmt" - "io/ioutil" - "net" - "net/http" - "net/url" - "strconv" - "strings" - "sync" - "time" - - "go.uber.org/zap" - - "github.com/cloudevents/sdk-go/pkg/cloudevents" - cecontext "github.com/cloudevents/sdk-go/pkg/cloudevents/context" - "github.com/cloudevents/sdk-go/pkg/cloudevents/observability" - "github.com/cloudevents/sdk-go/pkg/cloudevents/transport" -) - -// Transport adheres to transport.Transport. -var _ transport.Transport = (*Transport)(nil) - -const ( - // DefaultShutdownTimeout defines the default timeout given to the http.Server when calling Shutdown. - DefaultShutdownTimeout = time.Minute * 1 - - // TransportName is the name of this transport. - TransportName = "HTTP" -) - -// Transport acts as both a http client and a http handler. -type Transport struct { - // The encoding used to select the codec for outbound events. - Encoding Encoding - - // DefaultEncodingSelectionFn allows for other encoding selection strategies to be injected. - DefaultEncodingSelectionFn EncodingSelector - - // ShutdownTimeout defines the timeout given to the http.Server when calling Shutdown. - // If nil, DefaultShutdownTimeout is used. - ShutdownTimeout *time.Duration - - // Sending - - // Client is the http client that will be used to send requests. - // If nil, the Transport will create a one. - Client *http.Client - // Req is the base http request that is used for http.Do. - // Only .Method, .URL, .Close, and .Header is considered. - // If not set, Req.Method defaults to POST. - // Req.URL or context.WithTarget(url) are required for sending. - Req *http.Request - - // Receiving - - // Receiver is invoked target for incoming events. - Receiver transport.Receiver - // Converter is invoked if the incoming transport receives an undecodable - // message. - Converter transport.Converter - // Port is the port to bind the receiver to. Defaults to 8080. - Port *int - // Path is the path to bind the receiver to. Defaults to "/". - Path string - // Handler is the handler the http Server will use. Use this to reuse the - // http server. If nil, the Transport will create a one. - Handler *http.ServeMux - - // LongPollClient is the http client that will be used to long poll. - // If nil and LongPollReq is set, the Transport will create a one. - LongPollClient *http.Client - // LongPollReq is the base http request that is used for long poll. - // Only .Method, .URL, .Close, and .Header is considered. - // If not set, LongPollReq.Method defaults to GET. - // LongPollReq.URL or context.WithLongPollTarget(url) are required to long - // poll on StartReceiver. - LongPollReq *http.Request - - listener net.Listener - server *http.Server - handlerRegistered bool - codec transport.Codec - // Create Mutex - crMu sync.Mutex - // Receive Mutex - reMu sync.Mutex - - middleware []Middleware -} - -func New(opts ...Option) (*Transport, error) { - t := &Transport{ - Req: &http.Request{ - Method: http.MethodPost, - }, - } - if err := t.applyOptions(opts...); err != nil { - return nil, err - } - return t, nil -} - -func (t *Transport) applyOptions(opts ...Option) error { - for _, fn := range opts { - if err := fn(t); err != nil { - return err - } - } - return nil -} - -func (t *Transport) loadCodec(ctx context.Context) bool { - if t.codec == nil { - t.crMu.Lock() - if t.DefaultEncodingSelectionFn != nil && t.Encoding != Default { - logger := cecontext.LoggerFrom(ctx) - logger.Warn("transport has a DefaultEncodingSelectionFn set but Encoding is not Default. DefaultEncodingSelectionFn will be ignored.") - - t.codec = &Codec{ - Encoding: t.Encoding, - } - } else { - t.codec = &Codec{ - Encoding: t.Encoding, - DefaultEncodingSelectionFn: t.DefaultEncodingSelectionFn, - } - } - t.crMu.Unlock() - } - return true -} - -func copyHeaders(from, to http.Header) { - if from == nil || to == nil { - return - } - for header, values := range from { - for _, value := range values { - to.Add(header, value) - } - } -} - -// Ensure to is a non-nil map before copying -func copyHeadersEnsure(from http.Header, to *http.Header) { - if len(from) > 0 { - if *to == nil { - *to = http.Header{} - } - copyHeaders(from, *to) - } -} - -// Send implements Transport.Send -func (t *Transport) Send(ctx context.Context, event cloudevents.Event) (context.Context, *cloudevents.Event, error) { - ctx, r := observability.NewReporter(ctx, reportSend) - rctx, resp, err := t.obsSend(ctx, event) - if err != nil { - r.Error() - } else { - r.OK() - } - return rctx, resp, err -} - -func (t *Transport) obsSend(ctx context.Context, event cloudevents.Event) (context.Context, *cloudevents.Event, error) { - if t.Client == nil { - t.crMu.Lock() - t.Client = &http.Client{} - t.crMu.Unlock() - } - - req := http.Request{ - Header: HeaderFrom(ctx), - } - if t.Req != nil { - req.Method = t.Req.Method - req.URL = t.Req.URL - req.Close = t.Req.Close - req.Host = t.Req.Host - copyHeadersEnsure(t.Req.Header, &req.Header) - } - - // Override the default request with target from context. - if target := cecontext.TargetFrom(ctx); target != nil { - req.URL = target - } - - if ok := t.loadCodec(ctx); !ok { - return WithTransportContext(ctx, NewTransportContextFromResponse(nil)), nil, fmt.Errorf("unknown encoding set on transport: %d", t.Encoding) - } - - msg, err := t.codec.Encode(ctx, event) - if err != nil { - return WithTransportContext(ctx, NewTransportContextFromResponse(nil)), nil, err - } - - if m, ok := msg.(*Message); ok { - m.ToRequest(&req) - return httpDo(ctx, t.Client, &req, func(resp *http.Response, err error) (context.Context, *cloudevents.Event, error) { - rctx := WithTransportContext(ctx, NewTransportContextFromResponse(resp)) - if err != nil { - return rctx, nil, err - } - defer resp.Body.Close() - - body, _ := ioutil.ReadAll(resp.Body) - respEvent, err := t.MessageToEvent(ctx, &Message{ - Header: resp.Header, - Body: body, - }) - if err != nil { - isErr := true - handled := false - if txerr, ok := err.(*transport.ErrTransportMessageConversion); ok { - if !txerr.IsFatal() { - isErr = false - } - if txerr.Handled() { - handled = true - } - } - if isErr { - return rctx, nil, err - } - if handled { - return rctx, nil, nil - } - } - if accepted(resp) { - return rctx, respEvent, nil - } - return rctx, respEvent, fmt.Errorf("error sending cloudevent: %s", resp.Status) - }) - } - return WithTransportContext(ctx, NewTransportContextFromResponse(nil)), nil, fmt.Errorf("failed to encode Event into a Message") -} - -func (t *Transport) MessageToEvent(ctx context.Context, msg *Message) (*cloudevents.Event, error) { - logger := cecontext.LoggerFrom(ctx) - var event *cloudevents.Event - var err error - - if msg.CloudEventsVersion() != "" { - // This is likely a cloudevents encoded message, try to decode it. - if ok := t.loadCodec(ctx); !ok { - err = transport.NewErrTransportMessageConversion("http", fmt.Sprintf("unknown encoding set on transport: %d", t.Encoding), false, true) - logger.Error("failed to load codec", zap.Error(err)) - } else { - event, err = t.codec.Decode(ctx, msg) - } - } else { - err = transport.NewErrTransportMessageConversion("http", "cloudevents version unknown", false, false) - } - - // If codec returns and error, or could not load the correct codec, try - // with the converter if it is set. - if err != nil && t.HasConverter() { - event, err = t.Converter.Convert(ctx, msg, err) - } - - // If err is still set, it means that there was no converter, or the - // converter failed to convert. - if err != nil { - logger.Debug("failed to decode message", zap.Error(err)) - } - - // If event and error are both nil, then there is nothing to do with this event, it was handled. - if err == nil && event == nil { - logger.Debug("convert function returned (nil, nil)") - err = transport.NewErrTransportMessageConversion("http", "convert function handled request", true, false) - } - - return event, err -} - -// SetReceiver implements Transport.SetReceiver -func (t *Transport) SetReceiver(r transport.Receiver) { - t.Receiver = r -} - -// SetConverter implements Transport.SetConverter -func (t *Transport) SetConverter(c transport.Converter) { - t.Converter = c -} - -// HasConverter implements Transport.HasConverter -func (t *Transport) HasConverter() bool { - return t.Converter != nil -} - -// StartReceiver implements Transport.StartReceiver -// NOTE: This is a blocking call. -func (t *Transport) StartReceiver(ctx context.Context) error { - t.reMu.Lock() - defer t.reMu.Unlock() - - if t.LongPollReq != nil { - go func() { _ = t.longPollStart(ctx) }() - } - - if t.Handler == nil { - t.Handler = http.NewServeMux() - } - if !t.handlerRegistered { - // handler.Handle might panic if the user tries to use the same path as the sdk. - t.Handler.Handle(t.GetPath(), t) - t.handlerRegistered = true - } - - addr, err := t.listen() - if err != nil { - return err - } - - t.server = &http.Server{ - Addr: addr.String(), - Handler: attachMiddleware(t.Handler, t.middleware), - } - - // Shutdown - defer func() { - t.server.Close() - t.server = nil - }() - - errChan := make(chan error, 1) - go func() { - errChan <- t.server.Serve(t.listener) - }() - - // wait for the server to return or ctx.Done(). - select { - case <-ctx.Done(): - // Try a gracefully shutdown. - timeout := DefaultShutdownTimeout - if t.ShutdownTimeout != nil { - timeout = *t.ShutdownTimeout - } - ctx, cancel := context.WithTimeout(context.Background(), timeout) - defer cancel() - err := t.server.Shutdown(ctx) - <-errChan // Wait for server goroutine to exit - return err - case err := <-errChan: - return err - } -} - -func (t *Transport) longPollStart(ctx context.Context) error { - logger := cecontext.LoggerFrom(ctx) - logger.Info("starting long poll receiver") - - if t.LongPollClient == nil { - t.crMu.Lock() - t.LongPollClient = &http.Client{} - t.crMu.Unlock() - } - req := &http.Request{ - // TODO: decide if it is ok to use HeaderFrom context here. - Header: HeaderFrom(ctx), - } - if t.LongPollReq != nil { - req.Method = t.LongPollReq.Method - req.URL = t.LongPollReq.URL - req.Close = t.LongPollReq.Close - copyHeaders(t.LongPollReq.Header, req.Header) - } - - // Override the default request with target from context. - if target := LongPollTargetFrom(ctx); target != nil { - req.URL = target - } - - if req.URL == nil { - return errors.New("no long poll target found") - } - - req = req.WithContext(ctx) - msgCh := make(chan Message) - defer close(msgCh) - - go func(ch chan<- Message) { - for { - if resp, err := t.LongPollClient.Do(req); err != nil { - logger.Errorw("long poll request returned error", err) - uErr := err.(*url.Error) - if uErr.Temporary() || uErr.Timeout() { - continue - } - // TODO: if the transport is throwing errors, we might want to try again. Maybe with a back-off sleep. - // But this error also might be that there was a done on the context. - } else if resp.StatusCode == http.StatusNotModified { - // Keep polling. - continue - } else if resp.StatusCode == http.StatusOK { - body, _ := ioutil.ReadAll(resp.Body) - if err := resp.Body.Close(); err != nil { - logger.Warnw("error closing long poll response body", zap.Error(err)) - } - msg := Message{ - Header: resp.Header, - Body: body, - } - msgCh <- msg - } else { - // TODO: not sure what to do with upstream errors yet. - logger.Errorw("unhandled long poll response", zap.Any("resp", resp)) - } - } - }(msgCh) - - // Attach the long poll request context to the context. - ctx = WithTransportContext(ctx, TransportContext{ - URI: req.URL.RequestURI(), - Host: req.URL.Host, - Method: req.Method, - }) - - for { - select { - case <-ctx.Done(): - return nil - case msg := <-msgCh: - logger.Debug("got a message", zap.Any("msg", msg)) - if event, err := t.MessageToEvent(ctx, &msg); err != nil { - logger.Errorw("could not convert http message to event", zap.Error(err)) - } else { - logger.Debugw("got an event", zap.Any("event", event)) - // TODO: deliver event. - if _, err := t.invokeReceiver(ctx, *event); err != nil { - logger.Errorw("could not invoke receiver event", zap.Error(err)) - } - } - } - } -} - -// attachMiddleware attaches the HTTP middleware to the specified handler. -func attachMiddleware(h http.Handler, middleware []Middleware) http.Handler { - for _, m := range middleware { - h = m(h) - } - return h -} - -type eventError struct { - ctx context.Context - event *cloudevents.Event - err error -} - -func httpDo(ctx context.Context, client *http.Client, req *http.Request, fn func(*http.Response, error) (context.Context, *cloudevents.Event, error)) (context.Context, *cloudevents.Event, error) { - // Run the HTTP request in a goroutine and pass the response to fn. - c := make(chan eventError, 1) - req = req.WithContext(ctx) - go func() { - rctx, event, err := fn(client.Do(req)) - c <- eventError{ctx: rctx, event: event, err: err} - }() - select { - case <-ctx.Done(): - return ctx, nil, ctx.Err() - case ee := <-c: - return ee.ctx, ee.event, ee.err - } -} - -// accepted is a helper method to understand if the response from the target -// accepted the CloudEvent. -func accepted(resp *http.Response) bool { - if resp.StatusCode >= 200 && resp.StatusCode < 300 { - return true - } - return false -} - -func (t *Transport) invokeReceiver(ctx context.Context, event cloudevents.Event) (*Response, error) { - ctx, r := observability.NewReporter(ctx, reportReceive) - resp, err := t.obsInvokeReceiver(ctx, event) - if err != nil { - r.Error() - } else { - r.OK() - } - return resp, err -} - -func (t *Transport) obsInvokeReceiver(ctx context.Context, event cloudevents.Event) (*Response, error) { - logger := cecontext.LoggerFrom(ctx) - if t.Receiver != nil { - // Note: http does not use eventResp.Reason - eventResp := cloudevents.EventResponse{} - resp := Response{} - - err := t.Receiver.Receive(ctx, event, &eventResp) - if err != nil { - logger.Warnw("got an error from receiver fn", zap.Error(err)) - resp.StatusCode = http.StatusInternalServerError - return &resp, err - } - - if eventResp.Event != nil { - if t.loadCodec(ctx) { - if m, err := t.codec.Encode(ctx, *eventResp.Event); err != nil { - logger.Errorw("failed to encode response from receiver fn", zap.Error(err)) - } else if msg, ok := m.(*Message); ok { - resp.Message = *msg - } - } else { - logger.Error("failed to load codec") - resp.StatusCode = http.StatusInternalServerError - return &resp, err - } - // Look for a transport response context - var trx *TransportResponseContext - if ptrTrx, ok := eventResp.Context.(*TransportResponseContext); ok { - // found a *TransportResponseContext, use it. - trx = ptrTrx - } else if realTrx, ok := eventResp.Context.(TransportResponseContext); ok { - // found a TransportResponseContext, make it a pointer. - trx = &realTrx - } - // If we found a TransportResponseContext, use it. - if trx != nil && trx.Header != nil && len(trx.Header) > 0 { - copyHeadersEnsure(trx.Header, &resp.Message.Header) - } - } - - if eventResp.Status != 0 { - resp.StatusCode = eventResp.Status - } else { - resp.StatusCode = http.StatusAccepted // default is 202 - Accepted - } - return &resp, err - } - return nil, nil -} - -// ServeHTTP implements http.Handler -func (t *Transport) ServeHTTP(w http.ResponseWriter, req *http.Request) { - ctx, r := observability.NewReporter(req.Context(), reportServeHTTP) - // Add the transport context to ctx. - ctx = WithTransportContext(ctx, NewTransportContext(req)) - logger := cecontext.LoggerFrom(ctx) - - body, err := ioutil.ReadAll(req.Body) - if err != nil { - logger.Errorw("failed to handle request", zap.Error(err)) - w.WriteHeader(http.StatusBadRequest) - _, _ = w.Write([]byte(`{"error":"Invalid request"}`)) - r.Error() - return - } - - event, err := t.MessageToEvent(ctx, &Message{ - Header: req.Header, - Body: body, - }) - if err != nil { - isFatal := true - handled := false - if txerr, ok := err.(*transport.ErrTransportMessageConversion); ok { - isFatal = txerr.IsFatal() - handled = txerr.Handled() - } - if isFatal { - logger.Errorw("failed to convert http message to event", zap.Error(err)) - w.WriteHeader(http.StatusBadRequest) - _, _ = w.Write([]byte(fmt.Sprintf(`{"error":%q}`, err.Error()))) - r.Error() - return - } - // if handled, do not pass to receiver. - if handled { - w.WriteHeader(http.StatusNoContent) - r.OK() - return - } - } - if event == nil { - logger.Error("failed to get non-nil event from MessageToEvent") - w.WriteHeader(http.StatusBadRequest) - r.Error() - return - } - - resp, err := t.invokeReceiver(ctx, *event) - if err != nil { - logger.Warnw("error returned from invokeReceiver", zap.Error(err)) - w.WriteHeader(http.StatusBadRequest) - _, _ = w.Write([]byte(fmt.Sprintf(`{"error":%q}`, err.Error()))) - r.Error() - return - } - - if resp != nil { - if t.Req != nil { - copyHeaders(t.Req.Header, w.Header()) - } - if len(resp.Message.Header) > 0 { - copyHeaders(resp.Message.Header, w.Header()) - } - - status := http.StatusAccepted - if resp.StatusCode >= 200 && resp.StatusCode < 600 { - status = resp.StatusCode - } - w.Header().Add("Content-Length", strconv.Itoa(len(resp.Message.Body))) - w.WriteHeader(status) - - if len(resp.Message.Body) > 0 { - if _, err := w.Write(resp.Message.Body); err != nil { - r.Error() - return - } - } - - r.OK() - return - } - - w.WriteHeader(http.StatusNoContent) - r.OK() -} - -// GetPort returns the listening port. -// Returns -1 if there is a listening error. -// Note this will call net.Listen() if the listener is not already started. -func (t *Transport) GetPort() int { - // Ensure we have a listener and therefore a port. - if _, err := t.listen(); err == nil || t.Port != nil { - return *t.Port - } - return -1 -} - -func (t *Transport) setPort(port int) { - if t.Port == nil { - t.Port = new(int) - } - *t.Port = port -} - -// listen if not already listening, update t.Port -func (t *Transport) listen() (net.Addr, error) { - if t.listener == nil { - port := 8080 - if t.Port != nil { - port = *t.Port - if port < 0 || port > 65535 { - return nil, fmt.Errorf("invalid port %d", port) - } - } - var err error - if t.listener, err = net.Listen("tcp", fmt.Sprintf(":%d", port)); err != nil { - return nil, err - } - } - addr := t.listener.Addr() - if tcpAddr, ok := addr.(*net.TCPAddr); ok { - t.setPort(tcpAddr.Port) - } - return addr, nil -} - -// GetPath returns the path the transport is hosted on. If the path is '/', -// the transport will handle requests on any URI. To discover the true path -// a request was received on, inspect the context from Receive(cxt, ...) with -// TransportContextFrom(ctx). -func (t *Transport) GetPath() string { - path := strings.TrimSpace(t.Path) - if len(path) > 0 { - return path - } - return "/" // default -} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/message.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/message.go deleted file mode 100644 index e2ed55c970..0000000000 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/message.go +++ /dev/null @@ -1,9 +0,0 @@ -package transport - -// Message is the abstract transport message wrapper. -type Message interface { - // CloudEventsVersion returns the version of the CloudEvent. - CloudEventsVersion() string - - // TODO maybe get encoding -} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/transport.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/transport.go deleted file mode 100644 index a08d5a12e5..0000000000 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/transport.go +++ /dev/null @@ -1,44 +0,0 @@ -package transport - -import ( - "context" - - "github.com/cloudevents/sdk-go/pkg/cloudevents" -) - -// Transport is the interface for transport sender to send the converted Message -// over the underlying transport. -type Transport interface { - Send(context.Context, cloudevents.Event) (context.Context, *cloudevents.Event, error) - - SetReceiver(Receiver) - StartReceiver(context.Context) error - - // SetConverter sets the delegate to use for converting messages that have - // failed to be decoded from known codecs for this transport. - SetConverter(Converter) - // HasConverter is true when a non-nil converter has been set. - HasConverter() bool -} - -// Receiver is an interface to define how a transport will invoke a listener -// of incoming events. -type Receiver interface { - Receive(context.Context, cloudevents.Event, *cloudevents.EventResponse) error -} - -// ReceiveFunc wraps a function as a Receiver object. -type ReceiveFunc func(ctx context.Context, e cloudevents.Event, er *cloudevents.EventResponse) error - -// Receive implements Receiver.Receive -func (f ReceiveFunc) Receive(ctx context.Context, e cloudevents.Event, er *cloudevents.EventResponse) error { - return f(ctx, e, er) -} - -// Converter is an interface to define how a transport delegate to convert an -// non-understood transport message from the internal codecs. Providing a -// Converter allows incoming requests to be bridged to CloudEvents format if -// they have not been sent as an event in CloudEvents format. -type Converter interface { - Convert(context.Context, Message, error) (*cloudevents.Event, error) -} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/types/urlref.go b/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/types/urlref.go deleted file mode 100644 index 2578801cd8..0000000000 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/types/urlref.go +++ /dev/null @@ -1,79 +0,0 @@ -package types - -import ( - "encoding/json" - "encoding/xml" - "fmt" - "net/url" -) - -// URLRef is a wrapper to url.URL. It is intended to enforce compliance with -// the CloudEvents spec for their definition of URI-Reference. Custom -// marshal methods are implemented to ensure the outbound URLRef object is -// is a flat string. -// -// deprecated: use URIRef. -type URLRef struct { - url.URL -} - -// ParseURLRef attempts to parse the given string as a URI-Reference. -func ParseURLRef(u string) *URLRef { - if u == "" { - return nil - } - pu, err := url.Parse(u) - if err != nil { - return nil - } - return &URLRef{URL: *pu} -} - -// MarshalJSON implements a custom json marshal method used when this type is -// marshaled using json.Marshal. -func (u URLRef) MarshalJSON() ([]byte, error) { - b := fmt.Sprintf("%q", u.String()) - return []byte(b), nil -} - -// UnmarshalJSON implements the json unmarshal method used when this type is -// unmarshaled using json.Unmarshal. -func (u *URLRef) UnmarshalJSON(b []byte) error { - var ref string - if err := json.Unmarshal(b, &ref); err != nil { - return err - } - r := ParseURLRef(ref) - if r != nil { - *u = *r - } - return nil -} - -// MarshalXML implements a custom xml marshal method used when this type is -// marshaled using xml.Marshal. -func (u URLRef) MarshalXML(e *xml.Encoder, start xml.StartElement) error { - return e.EncodeElement(u.String(), start) -} - -// UnmarshalXML implements the xml unmarshal method used when this type is -// unmarshaled using xml.Unmarshal. -func (u *URLRef) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { - var ref string - if err := d.DecodeElement(&ref, &start); err != nil { - return err - } - r := ParseURLRef(ref) - if r != nil { - *u = *r - } - return nil -} - -// String returns the full string representation of the URI-Reference. -func (u *URLRef) String() string { - if u == nil { - return "" - } - return u.URL.String() -} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/alias.go b/vendor/github.com/cloudevents/sdk-go/v2/alias.go new file mode 100644 index 0000000000..4945c608d3 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/alias.go @@ -0,0 +1,161 @@ +package v2 + +// Package cloudevents alias' common functions and types to improve discoverability and reduce +// the number of imports for simple HTTP clients. + +import ( + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/client" + "github.com/cloudevents/sdk-go/v2/context" + "github.com/cloudevents/sdk-go/v2/event" + "github.com/cloudevents/sdk-go/v2/observability" + "github.com/cloudevents/sdk-go/v2/protocol" + "github.com/cloudevents/sdk-go/v2/protocol/http" + "github.com/cloudevents/sdk-go/v2/types" +) + +// Client + +type ClientOption client.Option +type Client = client.Client + +// Event + +type Event = event.Event +type Result = protocol.Result + +// Context + +type EventContext = event.EventContext +type EventContextV1 = event.EventContextV1 +type EventContextV03 = event.EventContextV03 + +// Custom Types + +type Timestamp = types.Timestamp +type URIRef = types.URIRef + +// HTTP Protocol + +type HTTPOption http.Option + +type HTTPProtocol = http.Protocol + +// Encoding + +type Encoding = binding.Encoding + +// Message + +type Message = binding.Message + +const ( + // ReadEncoding + + ApplicationXML = event.ApplicationXML + ApplicationJSON = event.ApplicationJSON + TextPlain = event.TextPlain + ApplicationCloudEventsJSON = event.ApplicationCloudEventsJSON + ApplicationCloudEventsBatchJSON = event.ApplicationCloudEventsBatchJSON + Base64 = event.Base64 + + // Event Versions + + VersionV1 = event.CloudEventsVersionV1 + VersionV03 = event.CloudEventsVersionV03 + + // Encoding + + EncodingBinary = binding.EncodingBinary + EncodingStructured = binding.EncodingStructured +) + +var ( + + // ContentType Helpers + + StringOfApplicationJSON = event.StringOfApplicationJSON + StringOfApplicationXML = event.StringOfApplicationXML + StringOfTextPlain = event.StringOfTextPlain + StringOfApplicationCloudEventsJSON = event.StringOfApplicationCloudEventsJSON + StringOfApplicationCloudEventsBatchJSON = event.StringOfApplicationCloudEventsBatchJSON + StringOfBase64 = event.StringOfBase64 + + // Client Creation + + NewClient = client.New + NewClientObserved = client.NewObserved + NewDefaultClient = client.NewDefault + NewHTTPReceiveHandler = client.NewHTTPReceiveHandler + + // Client Options + + WithEventDefaulter = client.WithEventDefaulter + WithUUIDs = client.WithUUIDs + WithTimeNow = client.WithTimeNow + WithTracePropagation = client.WithTracePropagation() + + // Results + + ResultIs = protocol.ResultIs + ResultAs = protocol.ResultAs + + // Receipt helpers + + NewReceipt = protocol.NewReceipt + + ResultACK = protocol.ResultACK + ResultNACK = protocol.ResultNACK + + IsACK = protocol.IsACK + IsNACK = protocol.IsNACK + + // Event Creation + + NewEvent = event.New + NewResult = protocol.NewResult + + NewHTTPResult = http.NewResult + + // Message Creation + + ToMessage = binding.ToMessage + + // HTTP Messages + + WriteHTTPRequest = http.WriteRequest + + // Tracing + + EnableTracing = observability.EnableTracing + + // Context + + ContextWithTarget = context.WithTarget + TargetFromContext = context.TargetFrom + WithEncodingBinary = binding.WithForceBinary + WithEncodingStructured = binding.WithForceStructured + + // Custom Types + + ParseTimestamp = types.ParseTimestamp + ParseURIRef = types.ParseURIRef + ParseURI = types.ParseURI + + // HTTP Protocol + + NewHTTP = http.New + + // HTTP Protocol Options + + WithTarget = http.WithTarget + WithHeader = http.WithHeader + WithShutdownTimeout = http.WithShutdownTimeout + //WithEncoding = http.WithEncoding + //WithStructuredEncoding = http.WithStructuredEncoding // TODO: expose new way + WithPort = http.WithPort + WithPath = http.WithPath + WithMiddleware = http.WithMiddleware + WithListener = http.WithListener + WithRoundTripper = http.WithRoundTripper +) diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/binary_writer.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/binary_writer.go new file mode 100644 index 0000000000..5d2fafe5d6 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/binary_writer.go @@ -0,0 +1,39 @@ +package binding + +import ( + "context" + "io" + + "github.com/cloudevents/sdk-go/v2/binding/spec" +) + +// BinaryWriter is used to visit a binary Message and generate a new representation. +// +// Protocols that supports binary encoding should implement this interface to implement direct +// binary to binary encoding and event to binary encoding. +// +// Start() and End() methods are invoked every time this BinaryWriter implementation is used to visit a Message +type BinaryWriter interface { + // Method invoked at the beginning of the visit. Useful to perform initial memory allocations + Start(ctx context.Context) error + + // Set a standard attribute. + // + // The value can either be the correct golang type for the attribute, or a canonical + // string encoding. See package types to perform the needed conversions + SetAttribute(attribute spec.Attribute, value interface{}) error + + // Set an extension attribute. + // + // The value can either be the correct golang type for the attribute, or a canonical + // string encoding. See package types to perform the needed conversions + SetExtension(name string, value interface{}) error + + // SetData receives an io.Reader for the data attribute. + // io.Reader is not invoked when the data attribute is empty + SetData(data io.Reader) error + + // End method is invoked only after the whole encoding process ends successfully. + // If it fails, it's never invoked. It can be used to finalize the message. + End(ctx context.Context) error +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/doc.go new file mode 100644 index 0000000000..e69902d0e9 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/doc.go @@ -0,0 +1,64 @@ +package binding + +/* + +Package binding defines interfaces for protocol bindings. + +NOTE: Most applications that emit or consume events should use the ../client +package, which provides a simpler API to the underlying binding. + +The interfaces in this package provide extra encoding and protocol information +to allow efficient forwarding and end-to-end reliable delivery between a +Receiver and a Sender belonging to different bindings. This is useful for +intermediary applications that route or forward events, but not necessary for +most "endpoint" applications that emit or consume events. + +Protocol Bindings + +A protocol binding usually implements a Message, a Sender and Receiver, a StructuredWriter and a BinaryWriter (depending on the supported encodings of the protocol) and an Write[ProtocolMessage] method. + +Read and write events + +The core of this package is the binding.Message interface. +Through binding.MessageReader It defines how to read a protocol specific message for an +encoded event in structured mode or binary mode. +The entity who receives a protocol specific data structure representing a message +(e.g. an HttpRequest) encapsulates it in a binding.Message implementation using a NewMessage method (e.g. http.NewMessage). +Then the entity that wants to send the binding.Message back on the wire, +translates it back to the protocol specific data structure (e.g. a Kafka ConsumerMessage), using +the writers BinaryWriter and StructuredWriter specific to that protocol. +Binding implementations exposes their writers +through a specific Write[ProtocolMessage] function (e.g. kafka.EncodeProducerMessage), +in order to simplify the encoding process. + +The encoding process can be customized in order to mutate the final result with binding.TransformerFactory. +A bunch of these are provided directly by the binding/transformer module. + +Usually binding.Message implementations can be encoded only one time, because the encoding process drain the message itself. +In order to consume a message several times, the binding/buffering module provides several APIs to buffer the Message. + +A message can be converted to an event.Event using binding.ToEvent() method. +An event.Event can be used as Message casting it to binding.EventMessage. + +In order to simplify the encoding process for each protocol, this package provide several utility methods like binding.Write and binding.DirectWrite. +The binding.Write method tries to preserve the structured/binary encoding, in order to be as much efficient as possible. + +Messages can be eventually wrapped to change their behaviours and binding their lifecycle, like the binding.FinishMessage. +Every Message wrapper implements the MessageWrapper interface + +Sender and Receiver + +A Receiver receives protocol specific messages and wraps them to into binding.Message implementations. + +A Sender converts arbitrary Message implementations to a protocol-specific form using the protocol specific Write method +and sends them. + +Message and ExactlyOnceMessage provide methods to allow acknowledgments to +propagate when a reliable messages is forwarded from a Receiver to a Sender. +QoS 0 (unreliable), 1 (at-least-once) and 2 (exactly-once) are supported. + +Transport + +A binding implementation providing Sender and Receiver implementations can be used as a Transport through the BindingTransport adapter. + +*/ diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/encoding.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/encoding.go new file mode 100644 index 0000000000..b5d3e3a1ca --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/encoding.go @@ -0,0 +1,26 @@ +package binding + +import "errors" + +// Encoding enum specifies the type of encodings supported by binding interfaces +type Encoding int + +const ( + // Binary encoding as specified in https://github.com/cloudevents/spec/blob/master/spec.md#message + EncodingBinary Encoding = iota + // Structured encoding as specified in https://github.com/cloudevents/spec/blob/master/spec.md#message + EncodingStructured + // Message is an instance of EventMessage or it contains EventMessage nested (through MessageWrapper) + EncodingEvent + // When the encoding is unknown (which means that the message is a non-event) + EncodingUnknown +) + +// Error to specify that or the Message is not an event or it is encoded with an unknown encoding +var ErrUnknownEncoding = errors.New("unknown Message encoding") + +// ErrNotStructured returned by Message.Structured for non-structured messages. +var ErrNotStructured = errors.New("message is not in structured mode") + +// ErrNotBinary returned by Message.Binary for non-binary messages. +var ErrNotBinary = errors.New("message is not in binary mode") diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/event_message.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/event_message.go new file mode 100644 index 0000000000..0d1a3b4d00 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/event_message.go @@ -0,0 +1,90 @@ +package binding + +import ( + "bytes" + "context" + + "github.com/cloudevents/sdk-go/v2/binding/format" + "github.com/cloudevents/sdk-go/v2/binding/spec" + "github.com/cloudevents/sdk-go/v2/event" +) + +const ( + FORMAT_EVENT_STRUCTURED = "FORMAT_EVENT_STRUCTURED" +) + +// EventMessage type-converts a event.Event object to implement Message. +// This allows local event.Event objects to be sent directly via Sender.Send() +// s.Send(ctx, binding.EventMessage(e)) +// When an event is wrapped into a EventMessage, the original event could be +// potentially mutated. If you need to use the Event again, after wrapping it into +// an Event message, you should copy it before +type EventMessage event.Event + +func ToMessage(e *event.Event) Message { + return (*EventMessage)(e) +} + +func (m *EventMessage) ReadEncoding() Encoding { + return EncodingEvent +} + +func (m *EventMessage) ReadStructured(ctx context.Context, builder StructuredWriter) error { + f := GetOrDefaultFromCtx(ctx, FORMAT_EVENT_STRUCTURED, format.JSON).(format.Format) + b, err := f.Marshal((*event.Event)(m)) + if err != nil { + return err + } + return builder.SetStructuredEvent(ctx, f, bytes.NewReader(b)) +} + +func (m *EventMessage) ReadBinary(ctx context.Context, b BinaryWriter) (err error) { + err = b.Start(ctx) + if err != nil { + return err + } + err = eventContextToBinaryWriter(m.Context, b) + if err != nil { + return err + } + // Pass the body + body := (*event.Event)(m).Data() + if len(body) > 0 { + err = b.SetData(bytes.NewReader(body)) + if err != nil { + return err + } + } + return b.End(ctx) +} + +func eventContextToBinaryWriter(c event.EventContext, b BinaryWriter) (err error) { + // Pass all attributes + sv := spec.VS.Version(c.GetSpecVersion()) + for _, a := range sv.Attributes() { + value := a.Get(c) + if value != nil { + err = b.SetAttribute(a, value) + } + if err != nil { + return err + } + } + // Pass all extensions + for k, v := range c.GetExtensions() { + err = b.SetExtension(k, v) + if err != nil { + return err + } + } + return nil +} + +func (*EventMessage) Finish(error) error { return nil } + +var _ Message = (*EventMessage)(nil) // Test it conforms to the interface + +// Configure which format to use when marshalling the event to structured mode +func UseFormatForEvent(ctx context.Context, f format.Format) context.Context { + return context.WithValue(ctx, FORMAT_EVENT_STRUCTURED, f) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/finish_message.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/finish_message.go new file mode 100644 index 0000000000..3c4efc5c0a --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/finish_message.go @@ -0,0 +1,27 @@ +package binding + +type finishMessage struct { + Message + finish func(error) +} + +func (m *finishMessage) GetWrappedMessage() Message { + return m.Message +} + +func (m *finishMessage) Finish(err error) error { + err2 := m.Message.Finish(err) // Finish original message first + if m.finish != nil { + m.finish(err) // Notify callback + } + return err2 +} + +var _ MessageWrapper = (*finishMessage)(nil) + +// WithFinish returns a wrapper for m that calls finish() and +// m.Finish() in its Finish(). +// Allows code to be notified when a message is Finished. +func WithFinish(m Message, finish func(error)) Message { + return &finishMessage{Message: m, finish: finish} +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/format/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/format/doc.go new file mode 100644 index 0000000000..61058dfadc --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/format/doc.go @@ -0,0 +1,8 @@ +package format + +/* +Package format formats structured events. + +The "application/cloudevents+json" format is built-in and always +available. Other formats may be added. +*/ diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/format/format.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/format/format.go new file mode 100644 index 0000000000..2f275c9833 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/format/format.go @@ -0,0 +1,71 @@ +package format + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/cloudevents/sdk-go/v2/event" +) + +// Format marshals and unmarshals structured events to bytes. +type Format interface { + // MediaType identifies the format + MediaType() string + // Marshal event to bytes + Marshal(*event.Event) ([]byte, error) + // Unmarshal bytes to event + Unmarshal([]byte, *event.Event) error +} + +// Prefix for event-format media types. +const Prefix = "application/cloudevents" + +// IsFormat returns true if mediaType begins with "application/cloudevents" +func IsFormat(mediaType string) bool { return strings.HasPrefix(mediaType, Prefix) } + +// JSON is the built-in "application/cloudevents+json" format. +var JSON = jsonFmt{} + +type jsonFmt struct{} + +func (jsonFmt) MediaType() string { return event.ApplicationCloudEventsJSON } + +func (jsonFmt) Marshal(e *event.Event) ([]byte, error) { return json.Marshal(e) } +func (jsonFmt) Unmarshal(b []byte, e *event.Event) error { + return json.Unmarshal(b, e) +} + +// built-in formats +var formats map[string]Format + +func init() { + formats = map[string]Format{} + Add(JSON) +} + +// Lookup returns the format for mediaType, or nil if not found. +func Lookup(mediaType string) Format { return formats[mediaType] } + +func unknown(mediaType string) error { + return fmt.Errorf("unknown event format media-type %#v", mediaType) +} + +// Add a new Format. It can be retrieved by Lookup(f.MediaType()) +func Add(f Format) { formats[f.MediaType()] = f } + +// Marshal an event to bytes using the mediaType event format. +func Marshal(mediaType string, e *event.Event) ([]byte, error) { + if f := formats[mediaType]; f != nil { + return f.Marshal(e) + } + return nil, unknown(mediaType) +} + +// Unmarshal bytes to an event using the mediaType event format. +func Unmarshal(mediaType string, b []byte, e *event.Event) error { + if f := formats[mediaType]; f != nil { + return f.Unmarshal(b, e) + } + return unknown(mediaType) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/message.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/message.go new file mode 100644 index 0000000000..e37cd3ca1d --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/message.go @@ -0,0 +1,99 @@ +package binding + +import "context" + +// The ReadStructured and ReadBinary methods allows to perform an optimized encoding of a Message to a specific data structure. +// A Sender should try each method of interest and fall back to binding.ToEvent() if none are supported. +// An out of the box algorithm is provided for writing a message: binding.Write(). +type MessageReader interface { + // Return the type of the message Encoding. + // The encoding should be preferably computed when the message is constructed. + ReadEncoding() Encoding + + // ReadStructured transfers a structured-mode event to a StructuredWriter. + // It must return ErrNotStructured if message is not in structured mode. + // + // Returns a different err if something wrong happened while trying to read the structured event. + // In this case, the caller must Finish the message with appropriate error. + // + // This allows Senders to avoid re-encoding messages that are + // already in suitable structured form. + ReadStructured(context.Context, StructuredWriter) error + + // ReadBinary transfers a binary-mode event to an BinaryWriter. + // It must return ErrNotBinary if message is not in binary mode. + // + // Returns a different err if something wrong happened while trying to read the binary event + // In this case, the caller must Finish the message with appropriate error + // + // This allows Senders to avoid re-encoding messages that are + // already in suitable binary form. + ReadBinary(context.Context, BinaryWriter) error +} + +// Message is the interface to a binding-specific message containing an event. +// +// Reliable Delivery +// +// There are 3 reliable qualities of service for messages: +// +// 0/at-most-once/unreliable: messages can be dropped silently. +// +// 1/at-least-once: messages are not dropped without signaling an error +// to the sender, but they may be duplicated in the event of a re-send. +// +// 2/exactly-once: messages are never dropped (without error) or +// duplicated, as long as both sending and receiving ends maintain +// some binding-specific delivery state. Whether this is persisted +// depends on the configuration of the binding implementations. +// +// The Message interface supports QoS 0 and 1, the ExactlyOnceMessage interface +// supports QoS 2 +// +// Message includes the MessageReader interface to read messages. Every binding.Message implementation *must* specify if the message can be accessed one or more times. +// +// When a Message can be forgotten by the entity who produced the message, Message.Finish() *must* be invoked. +type Message interface { + MessageReader + + // Finish *must* be called when message from a Receiver can be forgotten by + // the receiver. A QoS 1 sender should not call Finish() until it gets an acknowledgment of + // receipt on the underlying transport. For QoS 2 see ExactlyOnceMessage. + // + // Note that, depending on the Message implementation, forgetting to Finish the message + // could produce memory/resources leaks! + // + // Passing a non-nil err indicates sending or processing failed. + // A non-nil return indicates that the message was not accepted + // by the receivers peer. + Finish(error) error +} + +// ExactlyOnceMessage is implemented by received Messages +// that support QoS 2. Only transports that support QoS 2 need to +// implement or use this interface. +type ExactlyOnceMessage interface { + Message + + // Received is called by a forwarding QoS2 Sender when it gets + // acknowledgment of receipt (e.g. AMQP 'accept' or MQTT PUBREC) + // + // The receiver must call settle(nil) when it get's the ack-of-ack + // (e.g. AMQP 'settle' or MQTT PUBCOMP) or settle(err) if the + // transfer fails. + // + // Finally the Sender calls Finish() to indicate the message can be + // discarded. + // + // If sending fails, or if the sender does not support QoS 2, then + // Finish() may be called without any call to Received() + Received(settle func(error)) +} + +// Message Wrapper interface is used to walk through a decorated Message and unwrap it. +type MessageWrapper interface { + Message + + // Method to get the wrapped message + GetWrappedMessage() Message +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/attributes.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/attributes.go new file mode 100644 index 0000000000..20ec1ce92f --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/attributes.go @@ -0,0 +1,136 @@ +package spec + +import ( + "fmt" + "time" + + "github.com/cloudevents/sdk-go/v2/event" + + "github.com/cloudevents/sdk-go/v2/types" +) + +// Kind is a version-independent identifier for a CloudEvent context attribute. +type Kind uint8 + +const ( + // Required cloudevents attributes + ID Kind = iota + Source + SpecVersion + Type + // Optional cloudevents attributes + DataContentType + DataSchema + Subject + Time +) +const nAttrs = int(Time) + 1 + +var kindNames = [nAttrs]string{ + "id", + "source", + "specversion", + "type", + "datacontenttype", + "dataschema", + "subject", + "time", +} + +// String is a human-readable string, for a valid attribute name use Attribute.Name +func (k Kind) String() string { return kindNames[k] } + +// IsRequired returns true for attributes defined as "required" by the CE spec. +func (k Kind) IsRequired() bool { return k < DataContentType } + +// Attribute is a named attribute accessor. +// The attribute name is specific to a Version. +type Attribute interface { + Kind() Kind + // Name of the attribute with respect to the current spec Version() with prefix + PrefixedName() string + // Name of the attribute with respect to the current spec Version() + Name() string + // Version of the spec that this attribute belongs to + Version() Version + // Get the value of this attribute from an event context + Get(event.EventContextReader) interface{} + // Set the value of this attribute on an event context + Set(event.EventContextWriter, interface{}) error + // Delete this attribute from and event context, when possible + Delete(event.EventContextWriter) error +} + +// accessor provides Kind, Get, Set. +type accessor interface { + Kind() Kind + Get(event.EventContextReader) interface{} + Set(event.EventContextWriter, interface{}) error + Delete(event.EventContextWriter) error +} + +var acc = [nAttrs]accessor{ + &aStr{aKind(ID), event.EventContextReader.GetID, event.EventContextWriter.SetID}, + &aStr{aKind(Source), event.EventContextReader.GetSource, event.EventContextWriter.SetSource}, + &aStr{aKind(SpecVersion), event.EventContextReader.GetSpecVersion, func(writer event.EventContextWriter, s string) error { return nil }}, + &aStr{aKind(Type), event.EventContextReader.GetType, event.EventContextWriter.SetType}, + &aStr{aKind(DataContentType), event.EventContextReader.GetDataContentType, event.EventContextWriter.SetDataContentType}, + &aStr{aKind(DataSchema), event.EventContextReader.GetDataSchema, event.EventContextWriter.SetDataSchema}, + &aStr{aKind(Subject), event.EventContextReader.GetSubject, event.EventContextWriter.SetSubject}, + &aTime{aKind(Time), event.EventContextReader.GetTime, event.EventContextWriter.SetTime}, +} + +// aKind implements Kind() +type aKind Kind + +func (kind aKind) Kind() Kind { return Kind(kind) } + +type aStr struct { + aKind + get func(event.EventContextReader) string + set func(event.EventContextWriter, string) error +} + +func (a *aStr) Get(c event.EventContextReader) interface{} { + if s := a.get(c); s != "" { + return s + } + return nil // Treat blank as missing +} + +func (a *aStr) Set(c event.EventContextWriter, v interface{}) error { + s, err := types.ToString(v) + if err != nil { + return fmt.Errorf("invalid value for %s: %#v", a.Kind(), v) + } + return a.set(c, s) +} + +func (a *aStr) Delete(c event.EventContextWriter) error { + return a.set(c, "") +} + +type aTime struct { + aKind + get func(event.EventContextReader) time.Time + set func(event.EventContextWriter, time.Time) error +} + +func (a *aTime) Get(c event.EventContextReader) interface{} { + if v := a.get(c); !v.IsZero() { + return v + } + return nil // Treat zero time as missing. +} + +func (a *aTime) Set(c event.EventContextWriter, v interface{}) error { + t, err := types.ToTime(v) + if err != nil { + return fmt.Errorf("invalid value for %s: %#v", a.Kind(), v) + } + return a.set(c, t) +} + +func (a *aTime) Delete(c event.EventContextWriter) error { + return a.set(c, time.Time{}) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/doc.go new file mode 100644 index 0000000000..618098194d --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/doc.go @@ -0,0 +1,9 @@ +package spec + +/* +Package spec provides spec-version metadata. + +For use by code that maps events using (prefixed) attribute name strings. +Supports handling multiple spec versions uniformly. + +*/ diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/spec.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/spec.go new file mode 100644 index 0000000000..4de589185e --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/spec/spec.go @@ -0,0 +1,184 @@ +package spec + +import ( + "strings" + + "github.com/cloudevents/sdk-go/v2/event" +) + +// Version provides meta-data for a single spec-version. +type Version interface { + // String name of the version, e.g. "1.0" + String() string + // Prefix for attribute names. + Prefix() string + // Attribute looks up a prefixed attribute name (case insensitive). + // Returns nil if not found. + Attribute(prefixedName string) Attribute + // Attribute looks up the attribute from kind. + // Returns nil if not found. + AttributeFromKind(kind Kind) Attribute + // Attributes returns all the context attributes for this version. + Attributes() []Attribute + // Convert translates a context to this version. + Convert(event.EventContextConverter) event.EventContext + // NewContext returns a new context for this version. + NewContext() event.EventContext + // SetAttribute sets named attribute to value. + // + // Name is case insensitive. + // Does nothing if name does not start with prefix. + SetAttribute(context event.EventContextWriter, name string, value interface{}) error +} + +// Versions contains all known versions with the same attribute prefix. +type Versions struct { + prefix string + all []Version + m map[string]Version +} + +// Versions returns the list of all known versions, most recent first. +func (vs *Versions) Versions() []Version { return vs.all } + +// Version returns the named version. +func (vs *Versions) Version(name string) Version { + return vs.m[name] +} + +// Latest returns the latest Version +func (vs *Versions) Latest() Version { return vs.all[0] } + +// PrefixedSpecVersionName returns the specversion attribute PrefixedName +func (vs *Versions) PrefixedSpecVersionName() string { return vs.prefix + "specversion" } + +// Prefix is the lowercase attribute name prefix. +func (vs *Versions) Prefix() string { return vs.prefix } + +type attribute struct { + accessor + name string + version Version +} + +func (a *attribute) PrefixedName() string { return a.version.Prefix() + a.name } +func (a *attribute) Name() string { return a.name } +func (a *attribute) Version() Version { return a.version } + +type version struct { + prefix string + context event.EventContext + convert func(event.EventContextConverter) event.EventContext + attrMap map[string]Attribute + attrs []Attribute +} + +func (v *version) Attribute(name string) Attribute { return v.attrMap[strings.ToLower(name)] } +func (v *version) Attributes() []Attribute { return v.attrs } +func (v *version) String() string { return v.context.GetSpecVersion() } +func (v *version) Prefix() string { return v.prefix } +func (v *version) NewContext() event.EventContext { return v.context.Clone() } + +// HasPrefix is a case-insensitive prefix check. +func (v *version) HasPrefix(name string) bool { + return strings.HasPrefix(strings.ToLower(name), v.prefix) +} + +func (v *version) Convert(c event.EventContextConverter) event.EventContext { return v.convert(c) } + +func (v *version) SetAttribute(c event.EventContextWriter, name string, value interface{}) error { + if a := v.Attribute(name); a != nil { // Standard attribute + return a.Set(c, value) + } + name = strings.ToLower(name) + var err error + if v.HasPrefix(name) { // Extension attribute + return c.SetExtension(strings.TrimPrefix(name, v.prefix), value) + } + return err +} + +func (v *version) AttributeFromKind(kind Kind) Attribute { + for _, a := range v.Attributes() { + if a.Kind() == kind { + return a + } + } + return nil +} + +func newVersion( + prefix string, + context event.EventContext, + convert func(event.EventContextConverter) event.EventContext, + attrs ...*attribute, +) *version { + v := &version{ + prefix: strings.ToLower(prefix), + context: context, + convert: convert, + attrMap: map[string]Attribute{}, + attrs: make([]Attribute, len(attrs)), + } + for i, a := range attrs { + a.version = v + v.attrs[i] = a + v.attrMap[strings.ToLower(a.PrefixedName())] = a + } + return v +} + +// WithPrefix returns a set of versions with prefix added to all attribute names. +func WithPrefix(prefix string) *Versions { + attr := func(name string, kind Kind) *attribute { + return &attribute{accessor: acc[kind], name: name} + } + vs := &Versions{ + m: map[string]Version{}, + prefix: prefix, + all: []Version{ + newVersion(prefix, event.EventContextV1{}.AsV1(), + func(c event.EventContextConverter) event.EventContext { return c.AsV1() }, + attr("id", ID), + attr("source", Source), + attr("specversion", SpecVersion), + attr("type", Type), + attr("datacontenttype", DataContentType), + attr("dataschema", DataSchema), + attr("subject", Subject), + attr("time", Time), + ), + newVersion(prefix, event.EventContextV03{}.AsV03(), + func(c event.EventContextConverter) event.EventContext { return c.AsV03() }, + attr("specversion", SpecVersion), + attr("type", Type), + attr("source", Source), + attr("schemaurl", DataSchema), + attr("subject", Subject), + attr("id", ID), + attr("time", Time), + attr("datacontenttype", DataContentType), + ), + }, + } + for _, v := range vs.all { + vs.m[v.String()] = v + } + return vs +} + +// New returns a set of versions +func New() *Versions { return WithPrefix("") } + +// Built-in un-prefixed versions. +var ( + VS *Versions + V03 Version + V1 Version +) + +func init() { + VS = New() + V03 = VS.Version("0.3") + V1 = VS.Version("1.0") +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/structured_writer.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/structured_writer.go new file mode 100644 index 0000000000..8cf2bbe3e3 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/structured_writer.go @@ -0,0 +1,17 @@ +package binding + +import ( + "context" + "io" + + "github.com/cloudevents/sdk-go/v2/binding/format" +) + +// StructuredWriter is used to visit a structured Message and generate a new representation. +// +// Protocols that supports structured encoding should implement this interface to implement direct +// structured to structured encoding and event to structured encoding. +type StructuredWriter interface { + // Event receives an io.Reader for the whole event. + SetStructuredEvent(ctx context.Context, format format.Format, event io.Reader) error +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/to_event.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/to_event.go new file mode 100644 index 0000000000..3541633eaf --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/to_event.go @@ -0,0 +1,131 @@ +package binding + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + + "github.com/cloudevents/sdk-go/v2/binding/format" + "github.com/cloudevents/sdk-go/v2/binding/spec" + "github.com/cloudevents/sdk-go/v2/event" + "github.com/cloudevents/sdk-go/v2/types" +) + +// Generic error when a conversion of a Message to an Event fails +var ErrCannotConvertToEvent = errors.New("cannot convert message to event") + +// Translates a Message with a valid Structured or Binary representation to an Event. +// This function returns the Event generated from the Message and the original encoding of the message or +// an error that points the conversion error. +// transformers can be nil and this function guarantees that they are invoked only once during the encoding process. +func ToEvent(ctx context.Context, message MessageReader, transformers ...TransformerFactory) (*event.Event, error) { + if message == nil { + return nil, nil + } + + messageEncoding := message.ReadEncoding() + if messageEncoding == EncodingEvent { + m := message + for m != nil { + if em, ok := m.(*EventMessage); ok { + e := (*event.Event)(em) + var tf TransformerFactories + tf = transformers + if err := tf.EventTransformer()(e); err != nil { + return nil, err + } + return e, nil + } + if mw, ok := m.(MessageWrapper); ok { + m = mw.GetWrappedMessage() + } else { + break + } + } + return nil, ErrCannotConvertToEvent + } + + e := event.New() + encoder := &messageToEventBuilder{event: &e} + if _, err := DirectWrite( + context.Background(), + message, + encoder, + encoder, + ); err != nil { + return nil, err + } + var tf TransformerFactories + tf = transformers + if err := tf.EventTransformer()(&e); err != nil { + return nil, err + } + return &e, nil +} + +type messageToEventBuilder struct { + event *event.Event +} + +var _ StructuredWriter = (*messageToEventBuilder)(nil) +var _ BinaryWriter = (*messageToEventBuilder)(nil) + +func (b *messageToEventBuilder) SetStructuredEvent(ctx context.Context, format format.Format, event io.Reader) error { + var buf bytes.Buffer + _, err := io.Copy(&buf, event) + if err != nil { + return err + } + return format.Unmarshal(buf.Bytes(), b.event) +} + +func (b *messageToEventBuilder) Start(ctx context.Context) error { + return nil +} + +func (b *messageToEventBuilder) End(ctx context.Context) error { + return nil +} + +func (b *messageToEventBuilder) SetData(data io.Reader) error { + var buf bytes.Buffer + w, err := io.Copy(&buf, data) + if err != nil { + return err + } + if w != 0 { + b.event.DataEncoded = buf.Bytes() + } + return nil +} + +func (b *messageToEventBuilder) SetAttribute(attribute spec.Attribute, value interface{}) error { + // If spec version we need to change to right context struct + if attribute.Kind() == spec.SpecVersion { + str, err := types.ToString(value) + if err != nil { + return err + } + switch str { + case event.CloudEventsVersionV03: + b.event.Context = b.event.Context.AsV03() + case event.CloudEventsVersionV1: + b.event.Context = b.event.Context.AsV1() + default: + return fmt.Errorf("unrecognized event version %s", str) + } + return nil + } + return attribute.Set(b.event.Context, value) +} + +func (b *messageToEventBuilder) SetExtension(name string, value interface{}) error { + value, err := types.Validate(value) + if err != nil { + return err + } + b.event.SetExtension(name, value) + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/transformer.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/transformer.go new file mode 100644 index 0000000000..2e4b73153c --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/transformer.go @@ -0,0 +1,73 @@ +package binding + +import ( + "github.com/cloudevents/sdk-go/v2/event" +) + +// Implements a transformation process while transferring the event from the Message implementation +// to the provided encoder +// +// A transformer could optionally not provide an implementation for binary and/or structured encodings, +// returning nil to the respective factory method. +type TransformerFactory interface { + // Can return nil if the transformation doesn't support structured encoding directly + StructuredTransformer(writer StructuredWriter) StructuredWriter + + // Can return nil if the transformation doesn't support binary encoding directly + BinaryTransformer(writer BinaryWriter) BinaryWriter + + // Can return nil if the transformation doesn't support events + EventTransformer() EventTransformer +} + +// Utility type alias to manage multiple TransformerFactory +type TransformerFactories []TransformerFactory + +func (t TransformerFactories) StructuredTransformer(writer StructuredWriter) StructuredWriter { + if writer == nil { + return nil + } + res := writer + for _, b := range t { + if r := b.StructuredTransformer(res); r != nil { + res = r + } else { + return nil // Structured not supported! + } + } + return res +} + +func (t TransformerFactories) BinaryTransformer(writer BinaryWriter) BinaryWriter { + if writer == nil { + return nil + } + res := writer + for i := range t { + b := t[len(t)-i-1] + if r := b.BinaryTransformer(res); r != nil { + res = r + } else { + return nil // Binary not supported! + } + } + return res +} + +func (t TransformerFactories) EventTransformer() EventTransformer { + return func(e *event.Event) error { + for _, b := range t { + f := b.EventTransformer() + if f != nil { + err := f(e) + if err != nil { + return err + } + } + } + return nil + } +} + +// EventTransformer mutates the provided Event +type EventTransformer func(*event.Event) error diff --git a/vendor/github.com/cloudevents/sdk-go/v2/binding/write.go b/vendor/github.com/cloudevents/sdk-go/v2/binding/write.go new file mode 100644 index 0000000000..690d51fc45 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/binding/write.go @@ -0,0 +1,148 @@ +package binding + +import ( + "context" + + "github.com/cloudevents/sdk-go/v2/event" +) + +const ( + SKIP_DIRECT_STRUCTURED_ENCODING = "SKIP_DIRECT_STRUCTURED_ENCODING" + SKIP_DIRECT_BINARY_ENCODING = "SKIP_DIRECT_BINARY_ENCODING" + PREFERRED_EVENT_ENCODING = "PREFERRED_EVENT_ENCODING" +) + +// Invokes the encoders. structuredWriter and binaryWriter could be nil if the protocol doesn't support it. +// transformers can be nil and this function guarantees that they are invoked only once during the encoding process. +// +// Returns: +// * EncodingStructured, nil if message is correctly encoded in structured encoding +// * EncodingBinary, nil if message is correctly encoded in binary encoding +// * EncodingStructured, err if message was structured but error happened during the encoding +// * EncodingBinary, err if message was binary but error happened during the encoding +// * EncodingUnknown, ErrUnknownEncoding if message is not a structured or a binary Message +func DirectWrite( + ctx context.Context, + message MessageReader, + structuredWriter StructuredWriter, + binaryWriter BinaryWriter, + transformers ...TransformerFactory, +) (Encoding, error) { + if structuredWriter != nil && !GetOrDefaultFromCtx(ctx, SKIP_DIRECT_STRUCTURED_ENCODING, false).(bool) { + // Wrap the transformers in the structured builder + structuredWriter = TransformerFactories(transformers).StructuredTransformer(structuredWriter) + + // StructuredTransformer could return nil if one of transcoders doesn't support + // direct structured transcoding + if structuredWriter != nil { + if err := message.ReadStructured(ctx, structuredWriter); err == nil { + return EncodingStructured, nil + } else if err != ErrNotStructured { + return EncodingStructured, err + } + } + } + + if binaryWriter != nil && !GetOrDefaultFromCtx(ctx, SKIP_DIRECT_BINARY_ENCODING, false).(bool) { + binaryWriter = TransformerFactories(transformers).BinaryTransformer(binaryWriter) + if binaryWriter != nil { + if err := message.ReadBinary(ctx, binaryWriter); err == nil { + return EncodingBinary, nil + } else if err != ErrNotBinary { + return EncodingBinary, err + } + } + } + + return EncodingUnknown, ErrUnknownEncoding +} + +// This is the full algorithm to encode a Message using transformers: +// 1. It first tries direct encoding using DirectWrite +// 2. If no direct encoding is possible, it uses ToEvent to generate an Event representation +// 3. From the Event, the message is encoded back to the provided structured or binary encoders +// You can tweak the encoding process using the context decorators WithForceStructured, WithForceStructured, etc. +// transformers can be nil and this function guarantees that they are invoked only once during the encoding process. +// Returns: +// * EncodingStructured, nil if message is correctly encoded in structured encoding +// * EncodingBinary, nil if message is correctly encoded in binary encoding +// * EncodingUnknown, ErrUnknownEncoding if message.ReadEncoding() == EncodingUnknown +// * _, err if error happened during the encoding +func Write( + ctx context.Context, + message MessageReader, + structuredWriter StructuredWriter, + binaryWriter BinaryWriter, + transformers ...TransformerFactory, +) (Encoding, error) { + enc := message.ReadEncoding() + var err error + // Skip direct encoding if the event is an event message + if enc != EncodingEvent { + enc, err = DirectWrite(ctx, message, structuredWriter, binaryWriter, transformers...) + if enc != EncodingUnknown { + // Message directly encoded, nothing else to do here + return enc, err + } + } + + var e *event.Event + e, err = ToEvent(ctx, message, transformers...) + if err != nil { + return enc, err + } + + message = (*EventMessage)(e) + + if GetOrDefaultFromCtx(ctx, PREFERRED_EVENT_ENCODING, EncodingBinary).(Encoding) == EncodingStructured { + if structuredWriter != nil { + return EncodingStructured, message.ReadStructured(ctx, structuredWriter) + } + if binaryWriter != nil { + return EncodingBinary, message.ReadBinary(ctx, binaryWriter) + } + } else { + if binaryWriter != nil { + return EncodingBinary, message.ReadBinary(ctx, binaryWriter) + } + if structuredWriter != nil { + return EncodingStructured, message.ReadStructured(ctx, structuredWriter) + } + } + + return EncodingUnknown, ErrUnknownEncoding +} + +// Skip direct structured to structured encoding during the encoding process +func WithSkipDirectStructuredEncoding(ctx context.Context, skip bool) context.Context { + return context.WithValue(ctx, SKIP_DIRECT_STRUCTURED_ENCODING, skip) +} + +// Skip direct binary to binary encoding during the encoding process +func WithSkipDirectBinaryEncoding(ctx context.Context, skip bool) context.Context { + return context.WithValue(ctx, SKIP_DIRECT_BINARY_ENCODING, skip) +} + +// Define the preferred encoding from event to message during the encoding process +func WithPreferredEventEncoding(ctx context.Context, enc Encoding) context.Context { + return context.WithValue(ctx, PREFERRED_EVENT_ENCODING, enc) +} + +// Force structured encoding during the encoding process +func WithForceStructured(ctx context.Context) context.Context { + return context.WithValue(context.WithValue(ctx, PREFERRED_EVENT_ENCODING, EncodingStructured), SKIP_DIRECT_BINARY_ENCODING, true) +} + +// Force binary encoding during the encoding process +func WithForceBinary(ctx context.Context) context.Context { + return context.WithValue(context.WithValue(ctx, PREFERRED_EVENT_ENCODING, EncodingBinary), SKIP_DIRECT_STRUCTURED_ENCODING, true) +} + +// Get a configuration value from the provided context +func GetOrDefaultFromCtx(ctx context.Context, key string, def interface{}) interface{} { + if val := ctx.Value(key); val != nil { + return val + } else { + return def + } +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/client.go b/vendor/github.com/cloudevents/sdk-go/v2/client/client.go new file mode 100644 index 0000000000..7129980317 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/client.go @@ -0,0 +1,222 @@ +package client + +import ( + "context" + "errors" + "fmt" + "io" + "sync" + + "go.uber.org/zap" + + "github.com/cloudevents/sdk-go/v2/binding" + cecontext "github.com/cloudevents/sdk-go/v2/context" + "github.com/cloudevents/sdk-go/v2/event" + "github.com/cloudevents/sdk-go/v2/protocol" +) + +// Client interface defines the runtime contract the CloudEvents client supports. +type Client interface { + // Send will transmit the given event over the client's configured transport. + Send(ctx context.Context, event event.Event) protocol.Result + + // Request will transmit the given event over the client's configured + // transport and return any response event. + Request(ctx context.Context, event event.Event) (*event.Event, protocol.Result) + + // StartReceiver will register the provided function for callback on receipt + // of a cloudevent. It will also start the underlying protocol as it has + // been configured. + // This call is blocking. + // Valid fn signatures are: + // * func() + // * func() error + // * func(context.Context) + // * func(context.Context) protocol.Result + // * func(event.Event) + // * func(event.Event) protocol.Result + // * func(context.Context, event.Event) + // * func(context.Context, event.Event) protocol.Result + // * func(event.Event) *event.Event + // * func(event.Event) (*event.Event, protocol.Result) + // * func(context.Context, event.Event) *event.Event + // * func(context.Context, event.Event) (*event.Event, protocol.Result) + StartReceiver(ctx context.Context, fn interface{}) error +} + +// New produces a new client with the provided transport object and applied +// client options. +func New(obj interface{}, opts ...Option) (Client, error) { + c := &ceClient{} + + if p, ok := obj.(protocol.Sender); ok { + c.sender = p + } + if p, ok := obj.(protocol.Requester); ok { + c.requester = p + } + if p, ok := obj.(protocol.Responder); ok { + c.responder = p + } + if p, ok := obj.(protocol.Receiver); ok { + c.receiver = p + } + if p, ok := obj.(protocol.Opener); ok { + c.opener = p + } + + if err := c.applyOptions(opts...); err != nil { + return nil, err + } + return c, nil +} + +type ceClient struct { + sender protocol.Sender + requester protocol.Requester + receiver protocol.Receiver + responder protocol.Responder + // Optional. + opener protocol.Opener + + outboundContextDecorators []func(context.Context) context.Context + invoker Invoker + receiverMu sync.Mutex + eventDefaulterFns []EventDefaulter +} + +func (c *ceClient) applyOptions(opts ...Option) error { + for _, fn := range opts { + if err := fn(c); err != nil { + return err + } + } + return nil +} + +func (c *ceClient) Send(ctx context.Context, e event.Event) protocol.Result { + if c.sender == nil { + return errors.New("sender not set") + } + + for _, f := range c.outboundContextDecorators { + ctx = f(ctx) + } + + if len(c.eventDefaulterFns) > 0 { + for _, fn := range c.eventDefaulterFns { + e = fn(ctx, e) + } + } + + if err := e.Validate(); err != nil { + return err + } + + return c.sender.Send(ctx, (*binding.EventMessage)(&e)) +} + +func (c *ceClient) Request(ctx context.Context, e event.Event) (*event.Event, protocol.Result) { + if c.requester == nil { + return nil, errors.New("requester not set") + } + for _, f := range c.outboundContextDecorators { + ctx = f(ctx) + } + + if len(c.eventDefaulterFns) > 0 { + for _, fn := range c.eventDefaulterFns { + e = fn(ctx, e) + } + } + + if err := e.Validate(); err != nil { + return nil, err + } + + // If provided a requester, use it to do request/response. + var resp *event.Event + msg, err := c.requester.Request(ctx, (*binding.EventMessage)(&e)) + if msg != nil { + defer func() { + if err := msg.Finish(err); err != nil { + cecontext.LoggerFrom(ctx).Warnw("failed calling message.Finish", zap.Error(err)) + } + }() + } + + // try to turn msg into an event, it might not work and that is ok. + if rs, rserr := binding.ToEvent(ctx, msg); rserr != nil { + cecontext.LoggerFrom(ctx).Debugw("response: failed calling ToEvent", zap.Error(rserr), zap.Any("resp", msg)) + if err != nil { + err = fmt.Errorf("%w; failed to convert response into event: %s", err, rserr) + } else { + // If the protocol returns no error, it is an ACK on the request, but we had + // issues turning the response into an event, so make an ACK Result and pass + // down the ToEvent error as well. + err = fmt.Errorf("%w; failed to convert response into event: %s", protocol.ResultACK, rserr) + } + } else { + resp = rs + } + + return resp, err +} + +// StartReceiver sets up the given fn to handle Receive. +// See Client.StartReceiver for details. This is a blocking call. +func (c *ceClient) StartReceiver(ctx context.Context, fn interface{}) error { + c.receiverMu.Lock() + defer c.receiverMu.Unlock() + + if c.invoker != nil { + return fmt.Errorf("client already has a receiver") + } + + invoker, err := newReceiveInvoker(fn, c.eventDefaulterFns...) // TODO: this will have to pick between a observed invoker or not. + if err != nil { + return err + } + if invoker.IsReceiver() && c.receiver == nil { + return fmt.Errorf("mismatched receiver callback without protocol.Receiver supported by protocol") + } + if invoker.IsResponder() && c.responder == nil { + return fmt.Errorf("mismatched receiver callback without protocol.Responder supported by protocol") + } + c.invoker = invoker + + defer func() { + c.invoker = nil + }() + + // Start the opener, if set. + if c.opener != nil { + go func() { + // TODO: handle error correctly here. + if err := c.opener.OpenInbound(ctx); err != nil { + panic(err) + } + }() + } + + var msg binding.Message + var respFn protocol.ResponseFn + // Start Polling. + for { + if c.responder != nil { + msg, respFn, err = c.responder.Respond(ctx) + } else if c.receiver != nil { + msg, err = c.receiver.Receive(ctx) + } else { + return errors.New("responder nor receiver set") + } + + if err == io.EOF { // Normal close + return nil + } + + if err := c.invoker.Invoke(ctx, msg, respFn); err != nil { + return err + } + } +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/client_default.go b/vendor/github.com/cloudevents/sdk-go/v2/client/client_default.go new file mode 100644 index 0000000000..82877d6791 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/client_default.go @@ -0,0 +1,26 @@ +package client + +import ( + "github.com/cloudevents/sdk-go/v2/protocol/http" +) + +// NewDefault provides the good defaults for the common case using an HTTP +// Protocol client. The http transport has had WithBinaryEncoding http +// transport option applied to it. The client will always send Binary +// encoding but will inspect the outbound event context and match the version. +// The WithTimeNow, and WithUUIDs client options are also applied to the +// client, all outbound events will have a time and id set if not already +// present. +func NewDefault() (Client, error) { + p, err := http.New() + if err != nil { + return nil, err + } + + c, err := NewObserved(p, WithTimeNow(), WithUUIDs()) + if err != nil { + return nil, err + } + + return c, nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/client_observed.go b/vendor/github.com/cloudevents/sdk-go/v2/client/client_observed.go new file mode 100644 index 0000000000..edc472ae72 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/client_observed.go @@ -0,0 +1,101 @@ +package client + +import ( + "context" + "github.com/cloudevents/sdk-go/v2/event" + "github.com/cloudevents/sdk-go/v2/extensions" + "github.com/cloudevents/sdk-go/v2/observability" + "github.com/cloudevents/sdk-go/v2/protocol" + "go.opencensus.io/trace" +) + +// New produces a new client with the provided transport object and applied +// client options. +func NewObserved(protocol interface{}, opts ...Option) (Client, error) { + client, err := New(protocol, opts...) + if err != nil { + return nil, err + } + + c := &obsClient{client: client} + + if err := c.applyOptions(opts...); err != nil { + return nil, err + } + return c, nil +} + +type obsClient struct { + client Client + + addTracing bool +} + +func (c *obsClient) applyOptions(opts ...Option) error { + for _, fn := range opts { + if err := fn(c); err != nil { + return err + } + } + return nil +} + +// Send transmits the provided event on a preconfigured Protocol. Send returns +// an error if there was an an issue validating the outbound event or the +// transport returns an error. +func (c *obsClient) Send(ctx context.Context, e event.Event) protocol.Result { + ctx, r := observability.NewReporter(ctx, reportSend) + ctx, span := trace.StartSpan(ctx, observability.ClientSpanName, trace.WithSpanKind(trace.SpanKindClient)) + defer span.End() + if span.IsRecordingEvents() { + span.AddAttributes(EventTraceAttributes(&e)...) + } + + if c.addTracing { + e.Context = e.Context.Clone() + extensions.FromSpanContext(span.SpanContext()).AddTracingAttributes(&e) + } + + result := c.client.Send(ctx, e) + + if protocol.IsACK(result) { + r.OK() + } else { + r.Error() + } + return result +} + +func (c *obsClient) Request(ctx context.Context, e event.Event) (*event.Event, protocol.Result) { + ctx, r := observability.NewReporter(ctx, reportRequest) + ctx, span := trace.StartSpan(ctx, observability.ClientSpanName, trace.WithSpanKind(trace.SpanKindClient)) + defer span.End() + if span.IsRecordingEvents() { + span.AddAttributes(EventTraceAttributes(&e)...) + } + + resp, result := c.client.Request(ctx, e) + + if protocol.IsACK(result) { + r.OK() + } else { + r.Error() + } + + return resp, result +} + +// StartReceiver sets up the given fn to handle Receive. +// See Client.StartReceiver for details. This is a blocking call. +func (c *obsClient) StartReceiver(ctx context.Context, fn interface{}) error { + ctx, r := observability.NewReporter(ctx, reportStartReceiver) + + err := c.client.StartReceiver(ctx, fn) + + if err != nil { + r.Error() + } else { + r.OK() + } + return err +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/client/defaulters.go b/vendor/github.com/cloudevents/sdk-go/v2/client/defaulters.go similarity index 50% rename from vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/client/defaulters.go rename to vendor/github.com/cloudevents/sdk-go/v2/client/defaulters.go index 40bd85a9cb..5d0d7bc941 100644 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/client/defaulters.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/defaulters.go @@ -4,17 +4,18 @@ import ( "context" "time" - "github.com/cloudevents/sdk-go/pkg/cloudevents" + "github.com/cloudevents/sdk-go/v2/event" + "github.com/google/uuid" ) // EventDefaulter is the function signature for extensions that are able // to perform event defaulting. -type EventDefaulter func(ctx context.Context, event cloudevents.Event) cloudevents.Event +type EventDefaulter func(ctx context.Context, event event.Event) event.Event // DefaultIDToUUIDIfNotSet will inspect the provided event and assign a UUID to // context.ID if it is found to be empty. -func DefaultIDToUUIDIfNotSet(ctx context.Context, event cloudevents.Event) cloudevents.Event { +func DefaultIDToUUIDIfNotSet(ctx context.Context, event event.Event) event.Event { if event.Context != nil { if event.ID() == "" { event.Context = event.Context.Clone() @@ -26,7 +27,7 @@ func DefaultIDToUUIDIfNotSet(ctx context.Context, event cloudevents.Event) cloud // DefaultTimeToNowIfNotSet will inspect the provided event and assign a new // Timestamp to context.Time if it is found to be nil or zero. -func DefaultTimeToNowIfNotSet(ctx context.Context, event cloudevents.Event) cloudevents.Event { +func DefaultTimeToNowIfNotSet(ctx context.Context, event event.Event) event.Event { if event.Context != nil { if event.Time().IsZero() { event.Context = event.Context.Clone() @@ -35,3 +36,17 @@ func DefaultTimeToNowIfNotSet(ctx context.Context, event cloudevents.Event) clou } return event } + +// NewDefaultDataContentTypeIfNotSet returns a defaulter that will inspect the +// provided event and set the provided content type if content type is found +// to be empty. +func NewDefaultDataContentTypeIfNotSet(contentType string) EventDefaulter { + return func(ctx context.Context, event event.Event) event.Event { + if event.Context != nil { + if event.DataContentType() == "" { + event.SetDataContentType(contentType) + } + } + return event + } +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/client/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/client/doc.go similarity index 100% rename from vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/client/doc.go rename to vendor/github.com/cloudevents/sdk-go/v2/client/doc.go diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/http_receiver.go b/vendor/github.com/cloudevents/sdk-go/v2/client/http_receiver.go new file mode 100644 index 0000000000..416e971e69 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/http_receiver.go @@ -0,0 +1,39 @@ +package client + +import ( + "context" + "net/http" + + thttp "github.com/cloudevents/sdk-go/v2/protocol/http" +) + +func NewHTTPReceiveHandler(ctx context.Context, p *thttp.Protocol, fn interface{}) (*EventReceiver, error) { + invoker, err := newReceiveInvoker(fn) + if err != nil { + return nil, err + } + + return &EventReceiver{ + p: p, + invoker: invoker, + }, nil +} + +type EventReceiver struct { + p *thttp.Protocol + invoker Invoker +} + +func (r *EventReceiver) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + go func() { + r.p.ServeHTTP(rw, req) + }() + + ctx := context.Background() + msg, respFn, err := r.p.Respond(ctx) + if err != nil { + // TODO + } else if err := r.invoker.Invoke(ctx, msg, respFn); err != nil { + // TODO + } +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/invoker.go b/vendor/github.com/cloudevents/sdk-go/v2/client/invoker.go new file mode 100644 index 0000000000..9591fc0606 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/invoker.go @@ -0,0 +1,83 @@ +package client + +import ( + "context" + "fmt" + + "github.com/cloudevents/sdk-go/v2/binding" + cecontext "github.com/cloudevents/sdk-go/v2/context" + "github.com/cloudevents/sdk-go/v2/protocol" +) + +type Invoker interface { + Invoke(context.Context, binding.Message, protocol.ResponseFn) error + IsReceiver() bool + IsResponder() bool +} + +var _ Invoker = (*receiveInvoker)(nil) + +func newReceiveInvoker(fn interface{}, fns ...EventDefaulter) (Invoker, error) { + r := &receiveInvoker{ + eventDefaulterFns: fns, + } + + if fn, err := receiver(fn); err != nil { + return nil, err + } else { + r.fn = fn + } + + return r, nil +} + +type receiveInvoker struct { + fn *receiverFn + eventDefaulterFns []EventDefaulter +} + +func (r *receiveInvoker) Invoke(ctx context.Context, m binding.Message, respFn protocol.ResponseFn) (err error) { + defer func() { + if err2 := m.Finish(err); err2 == nil { + err = err2 + } + }() + + e, err := binding.ToEvent(ctx, m) + if err != nil { + return err + } + + if e != nil && r.fn != nil { + resp, result := r.fn.invoke(ctx, *e) + + // Apply the defaulter chain to the outgoing event. + if resp != nil && len(r.eventDefaulterFns) > 0 { + for _, fn := range r.eventDefaulterFns { + *resp = fn(ctx, *resp) + } + // Validate the event conforms to the CloudEvents Spec. + if verr := resp.Validate(); verr != nil { + cecontext.LoggerFrom(ctx).Error(fmt.Errorf("cloudevent validation failed on response event: %v, %w", verr, err)) + } + } + if respFn != nil { + var rm binding.Message + if resp != nil { + rm = (*binding.EventMessage)(resp) + } + + return respFn(ctx, rm, result) // TODO: there is a chance this never gets called. Is that ok? + } + } + + return nil +} + +func (r *receiveInvoker) IsReceiver() bool { + return !r.fn.hasEventOut +} + +func (r *receiveInvoker) IsResponder() bool { + return r.fn.hasEventOut +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/observability.go b/vendor/github.com/cloudevents/sdk-go/v2/client/observability.go new file mode 100644 index 0000000000..8e8add28e7 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/observability.go @@ -0,0 +1,94 @@ +package client + +import ( + "context" + "github.com/cloudevents/sdk-go/v2/event" + "github.com/cloudevents/sdk-go/v2/extensions" + "github.com/cloudevents/sdk-go/v2/observability" + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/trace" +) + +var ( + // LatencyMs measures the latency in milliseconds for the CloudEvents + // client methods. + LatencyMs = stats.Float64("cloudevents.io/sdk-go/client/latency", "The latency in milliseconds for the CloudEvents client methods.", "ms") +) + +var ( + // LatencyView is an OpenCensus view that shows client method latency. + LatencyView = &view.View{ + Name: "client/latency", + Measure: LatencyMs, + Description: "The distribution of latency inside of client for CloudEvents.", + Aggregation: view.Distribution(0, .01, .1, 1, 10, 100, 1000, 10000), + TagKeys: observability.LatencyTags(), + } +) + +type observed int32 + +// Adheres to Observable +var _ observability.Observable = observed(0) + +const ( + specversionAttr = "cloudevents.specversion" + typeAttr = "cloudevents.type" + sourceAttr = "cloudevents.source" + subjectAttr = "cloudevents.subject" + datacontenttypeAttr = "cloudevents.datacontenttype" + + reportSend observed = iota + reportRequest + reportStartReceiver +) + +// MethodName implements Observable.MethodName +func (o observed) MethodName() string { + switch o { + case reportSend: + return "send" + case reportRequest: + return "request" + case reportStartReceiver: + return "start_receiver" + default: + return "unknown" + } +} + +// LatencyMs implements Observable.LatencyMs +func (o observed) LatencyMs() *stats.Float64Measure { + return LatencyMs +} + +func EventTraceAttributes(e event.EventReader) []trace.Attribute { + as := []trace.Attribute{ + trace.StringAttribute(specversionAttr, e.SpecVersion()), + trace.StringAttribute(typeAttr, e.Type()), + trace.StringAttribute(sourceAttr, e.Source()), + } + if sub := e.Subject(); sub != "" { + as = append(as, trace.StringAttribute(subjectAttr, sub)) + } + if dct := e.DataContentType(); dct != "" { + as = append(as, trace.StringAttribute(datacontenttypeAttr, dct)) + } + return as +} + +// TraceSpan returns context and trace.Span based on event. Caller must call span.End() +func TraceSpan(ctx context.Context, e event.Event) (context.Context, *trace.Span) { + var span *trace.Span + if ext, ok := extensions.GetDistributedTracingExtension(e); ok { + ctx, span = ext.StartChildSpan(ctx, observability.ClientSpanName, trace.WithSpanKind(trace.SpanKindServer)) + } + if span == nil { + ctx, span = trace.StartSpan(ctx, observability.ClientSpanName, trace.WithSpanKind(trace.SpanKindServer)) + } + if span.IsRecordingEvents() { + span.AddAttributes(EventTraceAttributes(&e)...) + } + return ctx, span +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/options.go b/vendor/github.com/cloudevents/sdk-go/v2/client/options.go new file mode 100644 index 0000000000..3a1b40fe9e --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/options.go @@ -0,0 +1,73 @@ +package client + +import ( + "fmt" + "github.com/cloudevents/sdk-go/v2/binding" +) + +// Option is the function signature required to be considered an client.Option. +type Option func(interface{}) error + +// WithEventDefaulter adds an event defaulter to the end of the defaulter chain. +func WithEventDefaulter(fn EventDefaulter) Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + if fn == nil { + return fmt.Errorf("client option was given an nil event defaulter") + } + c.eventDefaulterFns = append(c.eventDefaulterFns, fn) + } + return nil + } +} + +func WithForceBinary() Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.outboundContextDecorators = append(c.outboundContextDecorators, binding.WithForceBinary) + } + return nil + } +} + +func WithForceStructured() Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.outboundContextDecorators = append(c.outboundContextDecorators, binding.WithForceStructured) + } + return nil + } +} + +// WithUUIDs adds DefaultIDToUUIDIfNotSet event defaulter to the end of the +// defaulter chain. +func WithUUIDs() Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.eventDefaulterFns = append(c.eventDefaulterFns, DefaultIDToUUIDIfNotSet) + } + return nil + } +} + +// WithTimeNow adds DefaultTimeToNowIfNotSet event defaulter to the end of the +// defaulter chain. +func WithTimeNow() Option { + return func(i interface{}) error { + if c, ok := i.(*ceClient); ok { + c.eventDefaulterFns = append(c.eventDefaulterFns, DefaultTimeToNowIfNotSet) + } + return nil + } +} + +// WithTracePropagation enables trace propagation via the distributed tracing +// extension. +func WithTracePropagation() Option { + return func(i interface{}) error { + if c, ok := i.(*obsClient); ok { + c.addTracing = true + } + return nil + } +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/client/receiver.go b/vendor/github.com/cloudevents/sdk-go/v2/client/receiver.go new file mode 100644 index 0000000000..e75f9abb98 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/client/receiver.go @@ -0,0 +1,189 @@ +package client + +import ( + "context" + "errors" + "fmt" + "reflect" + + "github.com/cloudevents/sdk-go/v2/event" + "github.com/cloudevents/sdk-go/v2/protocol" +) + +// Receive is the signature of a fn to be invoked for incoming cloudevents. +type ReceiveFull func(context.Context, event.Event) protocol.Result + +type receiverFn struct { + numIn int + numOut int + fnValue reflect.Value + + hasContextIn bool + hasEventIn bool + + hasEventOut bool + hasResultOut bool +} + +const ( + inParamUsage = "expected a function taking either no parameters, one or more of (context.Context, event.Event) ordered" + outParamUsage = "expected a function returning one or mode of (*event.Event, protocol.Result) ordered" +) + +var ( + contextType = reflect.TypeOf((*context.Context)(nil)).Elem() + eventType = reflect.TypeOf((*event.Event)(nil)).Elem() + eventPtrType = reflect.TypeOf((*event.Event)(nil)) // want the ptr type + resultType = reflect.TypeOf((*protocol.Result)(nil)).Elem() +) + +// receiver creates a receiverFn wrapper class that is used by the client to +// validate and invoke the provided function. +// Valid fn signatures are: +// * func() +// * func() error +// * func(context.Context) +// * func(context.Context) transport.Result +// * func(event.Event) +// * func(event.Event) transport.Result +// * func(context.Context, event.Event) +// * func(context.Context, event.Event) transport.Result +// * func(event.Event) *event.Event +// * func(event.Event) (*event.Event, transport.Result) +// * func(context.Context, event.Event, *event.Event +// * func(context.Context, event.Event) (*event.Event, transport.Result) +// +func receiver(fn interface{}) (*receiverFn, error) { + fnType := reflect.TypeOf(fn) + if fnType.Kind() != reflect.Func { + return nil, errors.New("must pass a function to handle events") + } + + r := &receiverFn{ + fnValue: reflect.ValueOf(fn), + numIn: fnType.NumIn(), + numOut: fnType.NumOut(), + } + + if err := r.validate(fnType); err != nil { + return nil, err + } + + return r, nil +} + +func (r *receiverFn) invoke(ctx context.Context, e event.Event) (*event.Event, protocol.Result) { + args := make([]reflect.Value, 0, r.numIn) + + if r.numIn > 0 { + if r.hasContextIn { + args = append(args, reflect.ValueOf(ctx)) + } + if r.hasEventIn { + args = append(args, reflect.ValueOf(e)) + } + } + v := r.fnValue.Call(args) + var respOut protocol.Result + var eOut *event.Event + if r.numOut > 0 { + i := 0 + if r.hasEventOut { + if eo, ok := v[i].Interface().(*event.Event); ok { + eOut = eo + } + i++ // <-- note, need to inc i. + } + if r.hasResultOut { + if resp, ok := v[i].Interface().(protocol.Result); ok { + respOut = resp + } + } + } + return eOut, respOut +} + +// Verifies that the inputs to a function have a valid signature +// Valid input is to be [0, all] of +// context.Context, event.Event in this order. +func (r *receiverFn) validateInParamSignature(fnType reflect.Type) error { + r.hasContextIn = false + r.hasEventIn = false + + switch fnType.NumIn() { + case 2: + // has to be (context.Context, event.Event) + if !fnType.In(1).ConvertibleTo(eventType) { + return fmt.Errorf("%s; cannot convert parameter 2 from %s to event.Event", inParamUsage, fnType.In(1)) + } else { + r.hasEventIn = true + } + fallthrough + case 1: + if !fnType.In(0).ConvertibleTo(contextType) { + if !fnType.In(0).ConvertibleTo(eventType) { + return fmt.Errorf("%s; cannot convert parameter 1 from %s to context.Context or event.Event", inParamUsage, fnType.In(0)) + } else if r.hasEventIn { + return fmt.Errorf("%s; duplicate parameter of type event.Event", inParamUsage) + } else { + r.hasEventIn = true + } + } else { + r.hasContextIn = true + } + fallthrough + case 0: + return nil + + default: + return fmt.Errorf("%s; function has too many parameters (%d)", inParamUsage, fnType.NumIn()) + } +} + +// Verifies that the outputs of a function have a valid signature +// Valid output signatures to be [0, all] of +// *event.Event, transport.Result in this order +func (r *receiverFn) validateOutParamSignature(fnType reflect.Type) error { + r.hasEventOut = false + r.hasResultOut = false + + switch fnType.NumOut() { + case 2: + // has to be (*event.Event, transport.Result) + if !fnType.Out(1).ConvertibleTo(resultType) { + return fmt.Errorf("%s; cannot convert parameter 2 from %s to event.Response", outParamUsage, fnType.Out(1)) + } else { + r.hasResultOut = true + } + fallthrough + case 1: + if !fnType.Out(0).ConvertibleTo(resultType) { + if !fnType.Out(0).ConvertibleTo(eventPtrType) { + return fmt.Errorf("%s; cannot convert parameter 1 from %s to *event.Event or transport.Result", outParamUsage, fnType.Out(0)) + } else { + r.hasEventOut = true + } + } else if r.hasResultOut { + return fmt.Errorf("%s; duplicate parameter of type event.Response", outParamUsage) + } else { + r.hasResultOut = true + } + fallthrough + case 0: + return nil + default: + return fmt.Errorf("%s; function has too many return types (%d)", outParamUsage, fnType.NumOut()) + } +} + +// validateReceiverFn validates that a function has the right number of in and +// out params and that they are of allowed types. +func (r *receiverFn) validate(fnType reflect.Type) error { + if err := r.validateInParamSignature(fnType); err != nil { + return err + } + if err := r.validateOutParamSignature(fnType); err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/context/context.go b/vendor/github.com/cloudevents/sdk-go/v2/context/context.go similarity index 62% rename from vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/context/context.go rename to vendor/github.com/cloudevents/sdk-go/v2/context/context.go index e580360f13..0cf24f496c 100644 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/context/context.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/context/context.go @@ -3,7 +3,6 @@ package context import ( "context" "net/url" - "strings" ) // Opaque key type used to store target @@ -51,26 +50,3 @@ func TopicFrom(ctx context.Context) string { } return "" } - -// Opaque key type used to store encoding -type encodingKeyType struct{} - -var encodingKey = encodingKeyType{} - -// WithEncoding returns back a new context with the given encoding. Encoding is intended to be transport dependent. -// For http transport, `encoding` should be one of [binary, structured] and will be used to override the outbound -// codec encoding setting. If the transport does not understand the encoding, it will be ignored. -func WithEncoding(ctx context.Context, encoding string) context.Context { - return context.WithValue(ctx, encodingKey, strings.ToLower(encoding)) -} - -// EncodingFrom looks in the given context and returns `target` as a parsed url if found and valid, otherwise nil. -func EncodingFrom(ctx context.Context) string { - c := ctx.Value(encodingKey) - if c != nil { - if s, ok := c.(string); ok && s != "" { - return s - } - } - return "" -} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/context/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/context/doc.go similarity index 100% rename from vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/context/doc.go rename to vendor/github.com/cloudevents/sdk-go/v2/context/doc.go diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/context/logger.go b/vendor/github.com/cloudevents/sdk-go/v2/context/logger.go similarity index 100% rename from vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/context/logger.go rename to vendor/github.com/cloudevents/sdk-go/v2/context/logger.go diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/content_type.go b/vendor/github.com/cloudevents/sdk-go/v2/event/content_type.go similarity index 84% rename from vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/content_type.go rename to vendor/github.com/cloudevents/sdk-go/v2/event/content_type.go index e4e0e17f2b..591878e5dc 100644 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/content_type.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/content_type.go @@ -1,6 +1,7 @@ -package cloudevents +package event const ( + TextPlain = "text/plain" TextJSON = "text/json" ApplicationJSON = "application/json" ApplicationXML = "application/xml" @@ -20,6 +21,12 @@ func StringOfApplicationXML() *string { return &a } +// StringOfTextPlain returns a string pointer to "text/plain" +func StringOfTextPlain() *string { + a := TextPlain + return &a +} + // StringOfApplicationCloudEventsJSON returns a string pointer to // "application/cloudevents+json" func StringOfApplicationCloudEventsJSON() *string { diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/data_content_encoding.go b/vendor/github.com/cloudevents/sdk-go/v2/event/data_content_encoding.go similarity index 87% rename from vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/data_content_encoding.go rename to vendor/github.com/cloudevents/sdk-go/v2/event/data_content_encoding.go index 180102ee3f..24c4094fc3 100644 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/data_content_encoding.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/data_content_encoding.go @@ -1,4 +1,4 @@ -package cloudevents +package event const ( Base64 = "base64" diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/codec.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/codec.go similarity index 67% rename from vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/codec.go rename to vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/codec.go index b9674889c6..fd68ca5598 100644 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/codec.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/codec.go @@ -4,16 +4,15 @@ import ( "context" "fmt" - "github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/json" - "github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/text" - "github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/xml" - "github.com/cloudevents/sdk-go/pkg/cloudevents/observability" + "github.com/cloudevents/sdk-go/v2/event/datacodec/json" + "github.com/cloudevents/sdk-go/v2/event/datacodec/text" + "github.com/cloudevents/sdk-go/v2/event/datacodec/xml" ) -// Decoder is the expected function signature for decoding `in` to `out`. What -// `in` is could be decoder dependent. For example, `in` could be bytes, or a -// base64 string. -type Decoder func(ctx context.Context, in, out interface{}) error +// Decoder is the expected function signature for decoding `in` to `out`. +// If Event sent the payload as base64, Decoder assumes that `in` is the +// decoded base64 byte array. +type Decoder func(ctx context.Context, in []byte, out interface{}) error // Encoder is the expected function signature for encoding `in` to bytes. // Returns an error if the encoder has an issue encoding `in`. @@ -56,18 +55,7 @@ func AddEncoder(contentType string, fn Encoder) { // Decode looks up and invokes the decoder registered for the given content // type. An error is returned if no decoder is registered for the given // content type. -func Decode(ctx context.Context, contentType string, in, out interface{}) error { - _, r := observability.NewReporter(ctx, reportDecode) - err := obsDecode(ctx, contentType, in, out) - if err != nil { - r.Error() - } else { - r.OK() - } - return err -} - -func obsDecode(ctx context.Context, contentType string, in, out interface{}) error { +func Decode(ctx context.Context, contentType string, in []byte, out interface{}) error { if fn, ok := decoder[contentType]; ok { return fn(ctx, in, out) } @@ -78,17 +66,6 @@ func obsDecode(ctx context.Context, contentType string, in, out interface{}) err // type. An error is returned if no encoder is registered for the given // content type. func Encode(ctx context.Context, contentType string, in interface{}) ([]byte, error) { - _, r := observability.NewReporter(ctx, reportEncode) - b, err := obsEncode(ctx, contentType, in) - if err != nil { - r.Error() - } else { - r.OK() - } - return b, err -} - -func obsEncode(ctx context.Context, contentType string, in interface{}) ([]byte, error) { if fn, ok := encoder[contentType]; ok { return fn(ctx, in) } diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/codec_observed.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/codec_observed.go new file mode 100644 index 0000000000..b14e6f8b66 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/codec_observed.go @@ -0,0 +1,50 @@ +package datacodec + +import ( + "context" + + "github.com/cloudevents/sdk-go/v2/event/datacodec/json" + "github.com/cloudevents/sdk-go/v2/event/datacodec/text" + "github.com/cloudevents/sdk-go/v2/event/datacodec/xml" + "github.com/cloudevents/sdk-go/v2/observability" +) + +func SetObservedCodecs() { + AddDecoder("", json.DecodeObserved) + AddDecoder("application/json", json.DecodeObserved) + AddDecoder("text/json", json.DecodeObserved) + AddDecoder("application/xml", xml.DecodeObserved) + AddDecoder("text/xml", xml.DecodeObserved) + AddDecoder("text/plain", text.DecodeObserved) + + AddEncoder("", json.Encode) + AddEncoder("application/json", json.EncodeObserved) + AddEncoder("text/json", json.EncodeObserved) + AddEncoder("application/xml", xml.EncodeObserved) + AddEncoder("text/xml", xml.EncodeObserved) + AddEncoder("text/plain", text.EncodeObserved) +} + +// DecodeObserved calls Decode and records the result. +func DecodeObserved(ctx context.Context, contentType string, in []byte, out interface{}) error { + _, r := observability.NewReporter(ctx, reportDecode) + err := Decode(ctx, contentType, in, out) + if err != nil { + r.Error() + } else { + r.OK() + } + return err +} + +// EncodeObserved calls Encode and records the result. +func EncodeObserved(ctx context.Context, contentType string, in interface{}) ([]byte, error) { + _, r := observability.NewReporter(ctx, reportEncode) + b, err := Encode(ctx, contentType, in) + if err != nil { + r.Error() + } else { + r.OK() + } + return b, err +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/doc.go similarity index 100% rename from vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/doc.go rename to vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/doc.go diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/data.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/data.go new file mode 100644 index 0000000000..f40869b34b --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/data.go @@ -0,0 +1,51 @@ +package json + +import ( + "context" + "encoding/json" + "fmt" + "reflect" +) + +// Decode takes `in` as []byte. +// If Event sent the payload as base64, Decoder assumes that `in` is the +// decoded base64 byte array. +func Decode(ctx context.Context, in []byte, out interface{}) error { + if in == nil { + return nil + } + if out == nil { + return fmt.Errorf("out is nil") + } + + if err := json.Unmarshal(in, out); err != nil { + return fmt.Errorf("[json] found bytes \"%s\", but failed to unmarshal: %s", string(in), err.Error()) + } + return nil +} + +// Encode attempts to json.Marshal `in` into bytes. Encode will inspect `in` +// and returns `in` unmodified if it is detected that `in` is already a []byte; +// Or json.Marshal errors. +func Encode(ctx context.Context, in interface{}) ([]byte, error) { + if in == nil { + return nil, nil + } + + it := reflect.TypeOf(in) + switch it.Kind() { + case reflect.Slice: + if it.Elem().Kind() == reflect.Uint8 { + + if b, ok := in.([]byte); ok && len(b) > 0 { + // check to see if it is a pre-encoded byte string. + if b[0] == byte('"') || b[0] == byte('{') || b[0] == byte('[') { + return b, nil + } + } + + } + } + + return json.Marshal(in) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/data_observed.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/data_observed.go new file mode 100644 index 0000000000..21308ce868 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/data_observed.go @@ -0,0 +1,30 @@ +package json + +import ( + "context" + "github.com/cloudevents/sdk-go/v2/observability" +) + +// DecodeObserved calls Decode and records the results. +func DecodeObserved(ctx context.Context, in []byte, out interface{}) error { + _, r := observability.NewReporter(ctx, reportDecode) + err := Decode(ctx, in, out) + if err != nil { + r.Error() + } else { + r.OK() + } + return err +} + +// EncodeObserved calls Encode and records the results. +func EncodeObserved(ctx context.Context, in interface{}) ([]byte, error) { + _, r := observability.NewReporter(ctx, reportEncode) + b, err := Encode(ctx, in) + if err != nil { + r.Error() + } else { + r.OK() + } + return b, err +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/json/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/doc.go similarity index 100% rename from vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/json/doc.go rename to vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/doc.go diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/json/observability.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/observability.go similarity index 79% rename from vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/json/observability.go rename to vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/observability.go index d38a4b7d25..7ff7965904 100644 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/json/observability.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/json/observability.go @@ -1,7 +1,7 @@ package json import ( - "github.com/cloudevents/sdk-go/pkg/cloudevents/observability" + "github.com/cloudevents/sdk-go/v2/observability" "go.opencensus.io/stats" "go.opencensus.io/stats/view" ) @@ -33,18 +33,6 @@ const ( reportDecode ) -// TraceName implements Observable.TraceName -func (o observed) TraceName() string { - switch o { - case reportEncode: - return "datacodec/json/encode" - case reportDecode: - return "datacodec/json/decode" - default: - return "datacodec/json/unknown" - } -} - // MethodName implements Observable.MethodName func (o observed) MethodName() string { switch o { diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/observability.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/observability.go similarity index 80% rename from vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/observability.go rename to vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/observability.go index a51e05eb9f..870ec5dfe8 100644 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/observability.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/observability.go @@ -1,7 +1,7 @@ package datacodec import ( - "github.com/cloudevents/sdk-go/pkg/cloudevents/observability" + "github.com/cloudevents/sdk-go/v2/observability" "go.opencensus.io/stats" "go.opencensus.io/stats/view" ) @@ -33,18 +33,6 @@ const ( reportDecode ) -// TraceName implements Observable.TraceName -func (o observed) TraceName() string { - switch o { - case reportEncode: - return "datacodec/encode" - case reportDecode: - return "datacodec/decode" - default: - return "datacodec/unknown" - } -} - // MethodName implements Observable.MethodName func (o observed) MethodName() string { switch o { diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/text/text.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/data.go similarity index 60% rename from vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/text/text.go rename to vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/data.go index 3c37c5b13d..933c9b3d7d 100644 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/text/text.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/data.go @@ -6,21 +6,12 @@ import ( "fmt" ) -func Decode(_ context.Context, in, out interface{}) error { +func Decode(_ context.Context, in []byte, out interface{}) error { p, _ := out.(*string) if p == nil { return fmt.Errorf("text.Decode out: want *string, got %T", out) } - switch s := in.(type) { - case string: - *p = s - case []byte: - *p = string(s) - case nil: // treat nil like []byte{} - *p = "" - default: - return fmt.Errorf("text.Decode in: want []byte or string, got %T", in) - } + *p = string(in) return nil } diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/data_observed.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/data_observed.go new file mode 100644 index 0000000000..2897ea6b2f --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/data_observed.go @@ -0,0 +1,30 @@ +package text + +import ( + "context" + "github.com/cloudevents/sdk-go/v2/observability" +) + +// DecodeObserved calls Decode and records the results. +func DecodeObserved(ctx context.Context, in []byte, out interface{}) error { + _, r := observability.NewReporter(ctx, reportDecode) + err := Decode(ctx, in, out) + if err != nil { + r.Error() + } else { + r.OK() + } + return err +} + +// EncodeObserved calls Encode and records the results. +func EncodeObserved(ctx context.Context, in interface{}) ([]byte, error) { + _, r := observability.NewReporter(ctx, reportEncode) + b, err := Encode(ctx, in) + if err != nil { + r.Error() + } else { + r.OK() + } + return b, err +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/doc.go new file mode 100644 index 0000000000..13316702ec --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/doc.go @@ -0,0 +1,4 @@ +/* +Package text holds the encoder/decoder implementation for `text/plain`. +*/ +package text diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/observability.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/observability.go new file mode 100644 index 0000000000..ede85a2adb --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/text/observability.go @@ -0,0 +1,51 @@ +package text + +import ( + "github.com/cloudevents/sdk-go/v2/observability" + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" +) + +var ( + // LatencyMs measures the latency in milliseconds for the CloudEvents xml data + // codec methods. + LatencyMs = stats.Float64("cloudevents.io/sdk-go/datacodec/text/latency", "The latency in milliseconds for the CloudEvents text data codec methods.", "ms") +) + +var ( + // LatencyView is an OpenCensus view that shows data codec xml method latency. + LatencyView = &view.View{ + Name: "datacodec/text/latency", + Measure: LatencyMs, + Description: "The distribution of latency inside of the text data codec for CloudEvents.", + Aggregation: view.Distribution(0, .01, .1, 1, 10, 100, 1000, 10000), + TagKeys: observability.LatencyTags(), + } +) + +type observed int32 + +// Adheres to Observable +var _ observability.Observable = observed(0) + +const ( + reportEncode observed = iota + reportDecode +) + +// MethodName implements Observable.MethodName +func (o observed) MethodName() string { + switch o { + case reportEncode: + return "encode" + case reportDecode: + return "decode" + default: + return "unknown" + } +} + +// LatencyMs implements Observable.LatencyMs +func (o observed) LatencyMs() *stats.Float64Measure { + return LatencyMs +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/data.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/data.go new file mode 100644 index 0000000000..13045e03d6 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/data.go @@ -0,0 +1,35 @@ +package xml + +import ( + "context" + "encoding/xml" + "fmt" +) + +// Decode takes `in` as []byte. +// If Event sent the payload as base64, Decoder assumes that `in` is the +// decoded base64 byte array. +func Decode(ctx context.Context, in []byte, out interface{}) error { + if in == nil { + return nil + } + + if err := xml.Unmarshal(in, out); err != nil { + return fmt.Errorf("[xml] found bytes, but failed to unmarshal: %s %s", err.Error(), string(in)) + } + return nil +} + +// Encode attempts to xml.Marshal `in` into bytes. Encode will inspect `in` +// and returns `in` unmodified if it is detected that `in` is already a []byte; +// Or xml.Marshal errors. +func Encode(ctx context.Context, in interface{}) ([]byte, error) { + if b, ok := in.([]byte); ok { + // check to see if it is a pre-encoded byte string. + if len(b) > 0 && b[0] == byte('"') { + return b, nil + } + } + + return xml.Marshal(in) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/data_observed.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/data_observed.go new file mode 100644 index 0000000000..14f6c2824b --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/data_observed.go @@ -0,0 +1,30 @@ +package xml + +import ( + "context" + "github.com/cloudevents/sdk-go/v2/observability" +) + +// DecodeObserved calls Decode and records the result. +func DecodeObserved(ctx context.Context, in []byte, out interface{}) error { + _, r := observability.NewReporter(ctx, reportDecode) + err := Decode(ctx, in, out) + if err != nil { + r.Error() + } else { + r.OK() + } + return err +} + +// EncodeObserved calls Encode and records the result. +func EncodeObserved(ctx context.Context, in interface{}) ([]byte, error) { + _, r := observability.NewReporter(ctx, reportEncode) + b, err := Encode(ctx, in) + if err != nil { + r.Error() + } else { + r.OK() + } + return b, err +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/xml/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/doc.go similarity index 100% rename from vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/xml/doc.go rename to vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/doc.go diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/xml/observability.go b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/observability.go similarity index 79% rename from vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/xml/observability.go rename to vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/observability.go index 31b0bb2699..b0f4c935d0 100644 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/datacodec/xml/observability.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/datacodec/xml/observability.go @@ -1,7 +1,7 @@ package xml import ( - "github.com/cloudevents/sdk-go/pkg/cloudevents/observability" + "github.com/cloudevents/sdk-go/v2/observability" "go.opencensus.io/stats" "go.opencensus.io/stats/view" ) @@ -33,18 +33,6 @@ const ( reportDecode ) -// TraceName implements Observable.TraceName -func (o observed) TraceName() string { - switch o { - case reportEncode: - return "datacodec/xml/encode" - case reportDecode: - return "datacodec/xml/decode" - default: - return "datacodec/xml/unknown" - } -} - // MethodName implements Observable.MethodName func (o observed) MethodName() string { switch o { diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/event/doc.go similarity index 86% rename from vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/doc.go rename to vendor/github.com/cloudevents/sdk-go/v2/event/doc.go index cc2201da91..b389d1e4ef 100644 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/doc.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/doc.go @@ -1,4 +1,4 @@ /* Package cloudevents provides primitives to work with CloudEvents specification: https://github.com/cloudevents/spec. */ -package cloudevents +package event diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event.go new file mode 100644 index 0000000000..78963856e2 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event.go @@ -0,0 +1,158 @@ +package event + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" +) + +// Event represents the canonical representation of a CloudEvent. +type Event struct { + Context EventContext + DataEncoded []byte + // DataBase64 indicates if the event, when serialized, represents + // the data field using the base64 encoding. + // In v0.3, this field is superseded by DataContentEncoding + DataBase64 bool + FieldErrors map[string]error +} + +const ( + defaultEventVersion = CloudEventsVersionV1 +) + +func (e *Event) fieldError(field string, err error) { + if e.FieldErrors == nil { + e.FieldErrors = make(map[string]error, 0) + } + e.FieldErrors[field] = err +} + +func (e *Event) fieldOK(field string) { + if e.FieldErrors != nil { + delete(e.FieldErrors, field) + } +} + +// New returns a new Event, an optional version can be passed to change the +// default spec version from 1.0 to the provided version. +func New(version ...string) Event { + specVersion := defaultEventVersion + if len(version) >= 1 { + specVersion = version[0] + } + e := &Event{} + e.SetSpecVersion(specVersion) + return *e +} + +// DEPRECATED: Access extensions directly via the e.Extensions() map. +// Use functions in the types package to convert extension values. +// For example replace this: +// +// var i int +// err := e.ExtensionAs("foo", &i) +// +// With this: +// +// i, err := types.ToInteger(e.Extensions["foo"]) +// +func (e Event) ExtensionAs(name string, obj interface{}) error { + return e.Context.ExtensionAs(name, obj) +} + +// Validate performs a spec based validation on this event. +// Validation is dependent on the spec version specified in the event context. +func (e Event) Validate() error { + if e.Context == nil { + return fmt.Errorf("every event conforming to the CloudEvents specification MUST include a context") + } + + if e.FieldErrors != nil { + errs := make([]string, 0) + for f, e := range e.FieldErrors { + errs = append(errs, fmt.Sprintf("%q: %s,", f, e)) + } + if len(errs) > 0 { + return fmt.Errorf("previous field errors: [%s]", strings.Join(errs, "\n")) + } + } + + if err := e.Context.Validate(); err != nil { + return err + } + + return nil +} + +// String returns a pretty-printed representation of the Event. +func (e Event) String() string { + b := strings.Builder{} + + b.WriteString("Validation: ") + + valid := e.Validate() + if valid == nil { + b.WriteString("valid\n") + } else { + b.WriteString("invalid\n") + } + if valid != nil { + b.WriteString(fmt.Sprintf("Validation Error: \n%s\n", valid.Error())) + } + + b.WriteString(e.Context.String()) + + if e.DataEncoded != nil { + if e.DataBase64 { + b.WriteString("Data (binary),\n ") + } else { + b.WriteString("Data,\n ") + } + switch e.DataMediaType() { + case ApplicationJSON: + var prettyJSON bytes.Buffer + err := json.Indent(&prettyJSON, e.DataEncoded, " ", " ") + if err != nil { + b.Write(e.DataEncoded) + } else { + b.Write(prettyJSON.Bytes()) + } + default: + b.Write(e.DataEncoded) + } + b.WriteString("\n") + } + + return b.String() +} + +func (e Event) Clone() Event { + out := Event{} + out.Context = e.Context.Clone() + out.DataEncoded = cloneBytes(e.DataEncoded) + out.DataBase64 = e.DataBase64 + out.FieldErrors = e.cloneFieldErrors() + return out +} + +func cloneBytes(in []byte) []byte { + if in == nil { + return nil + } + out := make([]byte, len(in)) + copy(out, in) + return out +} + +func (e Event) cloneFieldErrors() map[string]error { + if e.FieldErrors == nil { + return nil + } + newFE := make(map[string]error, len(e.FieldErrors)) + for k, v := range e.FieldErrors { + newFE[k] = v + } + return newFE +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/event/event_data.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_data.go new file mode 100644 index 0000000000..c85fe7e52c --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_data.go @@ -0,0 +1,113 @@ +package event + +import ( + "context" + "encoding/base64" + "fmt" + "strconv" + + "github.com/cloudevents/sdk-go/v2/event/datacodec" +) + +// SetData encodes the given payload with the given content type. +// If the provided payload is a byte array, when marshalled to json it will be encoded as base64. +// If the provided payload is different from byte array, datacodec.Encode is invoked to attempt a +// marshalling to byte array. +func (e *Event) SetData(contentType string, obj interface{}) error { + e.SetDataContentType(contentType) + + if e.SpecVersion() != CloudEventsVersionV1 { + return e.legacySetData(obj) + } + + // Version 1.0 and above. + switch obj := obj.(type) { + case []byte: + e.DataEncoded = obj + e.DataBase64 = true + default: + data, err := datacodec.Encode(context.Background(), e.DataMediaType(), obj) + if err != nil { + return err + } + e.DataEncoded = data + e.DataBase64 = false + } + + return nil +} + +// Deprecated: Delete when we do not have to support Spec v0.3. +func (e *Event) legacySetData(obj interface{}) error { + data, err := datacodec.Encode(context.Background(), e.DataMediaType(), obj) + if err != nil { + return err + } + if e.DeprecatedDataContentEncoding() == Base64 { + buf := make([]byte, base64.StdEncoding.EncodedLen(len(data))) + base64.StdEncoding.Encode(buf, data) + e.DataEncoded = buf + e.DataBase64 = false + } else { + data, err := datacodec.Encode(context.Background(), e.DataMediaType(), obj) + if err != nil { + return err + } + e.DataEncoded = data + e.DataBase64 = false + } + return nil +} + +const ( + quotes = `"'` +) + +func (e Event) Data() []byte { + return e.DataEncoded +} + +// DataAs attempts to populate the provided data object with the event payload. +// data should be a pointer type. +func (e Event) DataAs(obj interface{}) error { + data := e.Data() + + if len(data) == 0 { + // No data. + return nil + } + + if e.SpecVersion() != CloudEventsVersionV1 { + var err error + if data, err = e.legacyConvertData(data); err != nil { + return err + } + } + + return datacodec.Decode(context.Background(), e.DataMediaType(), data, obj) +} + +func (e Event) legacyConvertData(data []byte) ([]byte, error) { + if e.Context.DeprecatedGetDataContentEncoding() == Base64 { + var bs []byte + // test to see if we need to unquote the data. + if data[0] == quotes[0] || data[0] == quotes[1] { + str, err := strconv.Unquote(string(data)) + if err != nil { + return nil, err + } + bs = []byte(str) + } else { + bs = data + } + + buf := make([]byte, base64.StdEncoding.DecodedLen(len(bs))) + n, err := base64.StdEncoding.Decode(buf, bs) + if err != nil { + return nil, fmt.Errorf("failed to decode data from base64: %s", err.Error()) + } + data = buf[:n] + } + + return data, nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_interface.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_interface.go similarity index 70% rename from vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_interface.go rename to vendor/github.com/cloudevents/sdk-go/v2/event/event_interface.go index 1249f25dd9..a21fa9e39f 100644 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_interface.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_interface.go @@ -1,4 +1,4 @@ -package cloudevents +package event import ( "time" @@ -33,19 +33,35 @@ type EventReader interface { // Extensions use the CloudEvents type system, details in package cloudevents/types. Extensions() map[string]interface{} - // DEPRECATED: see event.Context.ExtensionAs // ExtensionAs returns event.Context.ExtensionAs(name, obj). + // + // DEPRECATED: Access extensions directly via the e.Extensions() map. + // Use functions in the types package to convert extension values. + // For example replace this: + // + // var i int + // err := e.ExtensionAs("foo", &i) + // + // With this: + // + // i, err := types.ToInteger(e.Extensions["foo"]) + // ExtensionAs(string, interface{}) error // Data Attribute + // Data returns the raw data buffer + // If the event was encoded with base64 encoding, Data returns the already decoded + // byte array + Data() []byte + // DataAs attempts to populate the provided data object with the event payload. - // data should be a pointer type. DataAs(interface{}) error } // EventWriter is the interface for writing through an event onto attributes. -// If an error is thrown by a sub-component, EventWriter panics. +// If an error is thrown by a sub-component, EventWriter caches the error +// internally and exposes errors with a call to event.Validate(). type EventWriter interface { // Context Attributes @@ -73,6 +89,9 @@ type EventWriter interface { // SetExtension performs event.Context.SetExtension. SetExtension(string, interface{}) - // SetData encodes the given payload with the current encoding settings. - SetData(interface{}) error + // SetData encodes the given payload with the given content type. + // If the provided payload is a byte array, when marshalled to json it will be encoded as base64. + // If the provided payload is different from byte array, datacodec.Encode is invoked to attempt a + // marshalling to byte array. + SetData(string, interface{}) error } diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_marshal.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_marshal.go similarity index 56% rename from vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_marshal.go rename to vendor/github.com/cloudevents/sdk-go/v2/event/event_marshal.go index f7c864f594..acca70de2b 100644 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_marshal.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_marshal.go @@ -1,15 +1,15 @@ -package cloudevents +package event import ( "context" - "encoding/base64" "encoding/json" "errors" "fmt" - "strconv" "strings" - "github.com/cloudevents/sdk-go/pkg/cloudevents/observability" + errors2 "github.com/pkg/errors" + + "github.com/cloudevents/sdk-go/v2/observability" ) // MarshalJSON implements a custom json marshal method used when this type is @@ -26,7 +26,7 @@ func (e Event) MarshalJSON() ([]byte, error) { var err error switch e.SpecVersion() { - case CloudEventsVersionV01, CloudEventsVersionV02, CloudEventsVersionV03: + case CloudEventsVersionV03: b, err = JsonEncodeLegacy(e) case CloudEventsVersionV1: b, err = JsonEncode(e) @@ -59,10 +59,6 @@ func (e *Event) UnmarshalJSON(b []byte) error { var err error switch version { - case CloudEventsVersionV01: - err = e.JsonDecodeV01(b, raw) - case CloudEventsVersionV02: - err = e.JsonDecodeV02(b, raw) case CloudEventsVersionV03: err = e.JsonDecodeV03(b, raw) case CloudEventsVersionV1: @@ -82,15 +78,6 @@ func (e *Event) UnmarshalJSON(b []byte) error { } func versionFromRawMessage(raw map[string]json.RawMessage) string { - // v0.1 - if v, ok := raw["cloudEventsVersion"]; ok { - var version string - if err := json.Unmarshal(v, &version); err != nil { - return "" - } - return version - } - // v0.2 and after if v, ok := raw["specversion"]; ok { var version string @@ -104,68 +91,51 @@ func versionFromRawMessage(raw map[string]json.RawMessage) string { // JsonEncode func JsonEncode(e Event) ([]byte, error) { - data, err := e.DataBytes() - if err != nil { - return nil, err - } - return jsonEncode(e.Context, data, e.DataBinary) + return jsonEncode(e.Context, e.DataEncoded, e.DataBase64) } // JsonEncodeLegacy func JsonEncodeLegacy(e Event) ([]byte, error) { - var data []byte isBase64 := e.Context.DeprecatedGetDataContentEncoding() == Base64 - var err error - data, err = e.DataBytes() - if err != nil { - return nil, err - } - return jsonEncode(e.Context, data, isBase64) + return jsonEncode(e.Context, e.DataEncoded, isBase64) } -func jsonEncode(ctx EventContextReader, data []byte, isBase64 bool) ([]byte, error) { +func jsonEncode(ctx EventContextReader, data []byte, shouldEncodeToBase64 bool) ([]byte, error) { var b map[string]json.RawMessage var err error - if ctx.GetSpecVersion() == CloudEventsVersionV01 { - b, err = marshalEventLegacy(ctx) - } else { - b, err = marshalEvent(ctx, ctx.GetExtensions()) - } + b, err = marshalEvent(ctx, ctx.GetExtensions()) if err != nil { return nil, err } if data != nil { - // data is passed in as an encoded []byte. That slice might be any - // number of things but for json encoding of the envelope all we care - // is if the payload is either a string or a json object. If it is a - // json object, it can be inserted into the body without modification. - // Otherwise we need to quote it if not already quoted. + // data here is a serialized version of whatever payload. + // If we need to write the payload as base64, shouldEncodeToBase64 is true. mediaType, err := ctx.GetDataMediaType() if err != nil { return nil, err } isJson := mediaType == "" || mediaType == ApplicationJSON || mediaType == TextJSON - // TODO(#60): we do not support json values at the moment, only objects and lists. - if isJson && !isBase64 { + // If isJson and no encoding to base64, we don't need to perform additional steps + if isJson && !shouldEncodeToBase64 { b["data"] = data } else { - var dataKey string - if ctx.GetSpecVersion() == CloudEventsVersionV1 { + var dataKey = "data" + if ctx.GetSpecVersion() == CloudEventsVersionV1 && shouldEncodeToBase64 { dataKey = "data_base64" - buf := make([]byte, base64.StdEncoding.EncodedLen(len(data))) - base64.StdEncoding.Encode(buf, data) - data = buf - } else { - dataKey = "data" } - if data[0] != byte('"') { - b[dataKey] = []byte(strconv.QuoteToASCII(string(data))) + var dataPointer []byte + if shouldEncodeToBase64 { + dataPointer, err = json.Marshal(data) } else { - // already quoted - b[dataKey] = data + dataPointer, err = json.Marshal(string(data)) } + if err != nil { + return nil, err + } + + b[dataKey] = dataPointer } } @@ -177,65 +147,6 @@ func jsonEncode(ctx EventContextReader, data []byte, isBase64 bool) ([]byte, err return body, nil } -// JsonDecodeV01 takes in the byte representation of a version 0.1 structured json CloudEvent and returns a -// cloudevent.Event or an error if there are parsing errors. -func (e *Event) JsonDecodeV01(body []byte, raw map[string]json.RawMessage) error { - ec := EventContextV01{} - if err := json.Unmarshal(body, &ec); err != nil { - return err - } - - var data interface{} - if d, ok := raw["data"]; ok { - data = []byte(d) - } - - e.Context = &ec - e.Data = data - e.DataEncoded = data != nil - - return nil -} - -// JsonDecodeV02 takes in the byte representation of a version 0.2 structured json CloudEvent and returns a -// cloudevent.Event or an error if there are parsing errors. -func (e *Event) JsonDecodeV02(body []byte, raw map[string]json.RawMessage) error { - ec := EventContextV02{} - if err := json.Unmarshal(body, &ec); err != nil { - return err - } - - // TODO: could use reflection to get these. - delete(raw, "specversion") - delete(raw, "type") - delete(raw, "source") - delete(raw, "id") - delete(raw, "time") - delete(raw, "schemaurl") - delete(raw, "contenttype") - - var data interface{} - if d, ok := raw["data"]; ok { - data = []byte(d) - } - delete(raw, "data") - - if len(raw) > 0 { - extensions := make(map[string]interface{}, len(raw)) - for k, v := range raw { - k = strings.ToLower(k) - extensions[k] = v - } - ec.Extensions = extensions - } - - e.Context = &ec - e.Data = data - e.DataEncoded = data != nil - - return nil -} - // JsonDecodeV03 takes in the byte representation of a version 0.3 structured json CloudEvent and returns a // cloudevent.Event or an error if there are parsing errors. func (e *Event) JsonDecodeV03(body []byte, raw map[string]json.RawMessage) error { @@ -244,7 +155,6 @@ func (e *Event) JsonDecodeV03(body []byte, raw map[string]json.RawMessage) error return err } - // TODO: could use reflection to get these. delete(raw, "specversion") delete(raw, "type") delete(raw, "source") @@ -255,24 +165,53 @@ func (e *Event) JsonDecodeV03(body []byte, raw map[string]json.RawMessage) error delete(raw, "datacontenttype") delete(raw, "datacontentencoding") - var data interface{} + var data []byte if d, ok := raw["data"]; ok { - data = []byte(d) + data = d + + // Decode the Base64 if we have a base64 payload + if ec.DeprecatedGetDataContentEncoding() == Base64 { + var tmp []byte + if err := json.Unmarshal(d, &tmp); err != nil { + return err + } + e.DataBase64 = true + e.DataEncoded = tmp + } else { + if ec.DataContentType != nil { + ct := *ec.DataContentType + if ct != ApplicationJSON && ct != TextJSON { + var dataStr string + err := json.Unmarshal(d, &dataStr) + if err != nil { + return err + } + + data = []byte(dataStr) + } + } + e.DataEncoded = data + e.DataBase64 = false + } } delete(raw, "data") if len(raw) > 0 { extensions := make(map[string]interface{}, len(raw)) + ec.Extensions = extensions for k, v := range raw { k = strings.ToLower(k) - extensions[k] = v + var tmp interface{} + if err := json.Unmarshal(v, &tmp); err != nil { + return err + } + if err := ec.SetExtension(k, tmp); err != nil { + return errors2.Wrap(err, "Cannot set extension with key "+k) + } } - ec.Extensions = extensions } e.Context = &ec - e.Data = data - e.DataEncoded = data != nil return nil } @@ -294,9 +233,21 @@ func (e *Event) JsonDecodeV1(body []byte, raw map[string]json.RawMessage) error delete(raw, "dataschema") delete(raw, "datacontenttype") - var data interface{} + var data []byte if d, ok := raw["data"]; ok { - data = []byte(d) + data = d + if ec.DataContentType != nil { + ct := *ec.DataContentType + if ct != ApplicationJSON && ct != TextJSON { + var dataStr string + err := json.Unmarshal(d, &dataStr) + if err != nil { + return err + } + + data = []byte(dataStr) + } + } } delete(raw, "data") @@ -307,38 +258,43 @@ func (e *Event) JsonDecodeV1(body []byte, raw map[string]json.RawMessage) error return err } dataBase64 = tmp + } delete(raw, "data_base64") + if data != nil && dataBase64 != nil { + return errors.New("parsing error: JSON decoder found both 'data', and 'data_base64' in JSON payload") + } + if data != nil { + e.DataEncoded = data + e.DataBase64 = false + } else if dataBase64 != nil { + e.DataEncoded = dataBase64 + e.DataBase64 = true + } + if len(raw) > 0 { extensions := make(map[string]interface{}, len(raw)) + ec.Extensions = extensions for k, v := range raw { k = strings.ToLower(k) - var tmp string + var tmp interface{} if err := json.Unmarshal(v, &tmp); err != nil { return err } - extensions[k] = tmp + if err := ec.SetExtension(k, tmp); err != nil { + return errors2.Wrap(err, "Cannot set extension with key "+k) + } } - ec.Extensions = extensions } e.Context = &ec - if data != nil && dataBase64 != nil { - return errors.New("parsing error: JSON decoder found both 'data', and 'data_base64' in JSON payload") - } - if data != nil { - e.Data = data - } else if dataBase64 != nil { - e.Data = dataBase64 - } - e.DataEncoded = data != nil return nil } -func marshalEventLegacy(event interface{}) (map[string]json.RawMessage, error) { - b, err := json.Marshal(event) +func marshalEvent(eventCtx EventContextReader, extensions map[string]interface{}) (map[string]json.RawMessage, error) { + b, err := json.Marshal(eventCtx) if err != nil { return nil, err } @@ -348,19 +304,12 @@ func marshalEventLegacy(event interface{}) (map[string]json.RawMessage, error) { return nil, err } - return brm, nil -} - -func marshalEvent(event interface{}, extensions map[string]interface{}) (map[string]json.RawMessage, error) { - b, err := json.Marshal(event) + sv, err := json.Marshal(eventCtx.GetSpecVersion()) if err != nil { return nil, err } - brm := map[string]json.RawMessage{} - if err := json.Unmarshal(b, &brm); err != nil { - return nil, err - } + brm["specversion"] = sv for k, v := range extensions { k = strings.ToLower(k) diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_observability.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_observability.go similarity index 78% rename from vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_observability.go rename to vendor/github.com/cloudevents/sdk-go/v2/event/event_observability.go index bce63f5c60..e21a845f1c 100644 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_observability.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_observability.go @@ -1,9 +1,9 @@ -package cloudevents +package event import ( "fmt" - "github.com/cloudevents/sdk-go/pkg/cloudevents/observability" + "github.com/cloudevents/sdk-go/v2/observability" "go.opencensus.io/stats" "go.opencensus.io/stats/view" ) @@ -38,18 +38,6 @@ const ( reportUnmarshal ) -// TraceName implements Observable.TraceName -func (o observed) TraceName() string { - switch o { - case reportMarshal: - return "cloudevents/event/marshaljson" - case reportUnmarshal: - return "cloudevents/event/unmarshaljson" - default: - return "cloudevents/event/unknwown" - } -} - // MethodName implements Observable.MethodName func (o observed) MethodName() string { switch o { @@ -78,11 +66,6 @@ type eventJSONObserved struct { // Adheres to Observable var _ observability.Observable = (*eventJSONObserved)(nil) -// TraceName implements Observable.TraceName -func (c eventJSONObserved) TraceName() string { - return fmt.Sprintf("%s/%s", c.o.TraceName(), c.v) -} - // MethodName implements Observable.MethodName func (c eventJSONObserved) MethodName() string { return fmt.Sprintf("%s/%s", c.o.MethodName(), c.v) diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_reader.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_reader.go similarity index 99% rename from vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_reader.go rename to vendor/github.com/cloudevents/sdk-go/v2/event/event_reader.go index fe49e8424d..86ca609b46 100644 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_reader.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_reader.go @@ -1,4 +1,4 @@ -package cloudevents +package event import ( "time" diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_writer.go b/vendor/github.com/cloudevents/sdk-go/v2/event/event_writer.go similarity index 59% rename from vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_writer.go rename to vendor/github.com/cloudevents/sdk-go/v2/event/event_writer.go index 4f6f0f80ec..3d392d5601 100644 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/event_writer.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/event_writer.go @@ -1,4 +1,4 @@ -package cloudevents +package event import ( "fmt" @@ -9,86 +9,105 @@ var _ EventWriter = (*Event)(nil) // SetSpecVersion implements EventWriter.SetSpecVersion func (e *Event) SetSpecVersion(v string) { - if e.Context == nil { - switch v { - case CloudEventsVersionV01: - e.Context = EventContextV01{}.AsV01() - case CloudEventsVersionV02: - e.Context = EventContextV02{}.AsV02() - case CloudEventsVersionV03: - e.Context = EventContextV03{}.AsV03() - case CloudEventsVersionV1: - e.Context = EventContextV1{}.AsV1() - default: - panic(fmt.Errorf("a valid spec version is required: [%s, %s, %s, %s]", - CloudEventsVersionV01, CloudEventsVersionV02, CloudEventsVersionV03, CloudEventsVersionV1)) + switch v { + case CloudEventsVersionV03: + if e.Context == nil { + e.Context = &EventContextV03{} + } else { + e.Context = e.Context.AsV03() } + case CloudEventsVersionV1: + if e.Context == nil { + e.Context = &EventContextV1{} + } else { + e.Context = e.Context.AsV1() + } + default: + e.fieldError("specversion", fmt.Errorf("a valid spec version is required: [%s, %s]", + CloudEventsVersionV03, CloudEventsVersionV1)) return } - if err := e.Context.SetSpecVersion(v); err != nil { - panic(err) - } + e.fieldOK("specversion") + return } // SetType implements EventWriter.SetType func (e *Event) SetType(t string) { if err := e.Context.SetType(t); err != nil { - panic(err) + e.fieldError("type", err) + } else { + e.fieldOK("type") } } // SetSource implements EventWriter.SetSource func (e *Event) SetSource(s string) { if err := e.Context.SetSource(s); err != nil { - panic(err) + e.fieldError("source", err) + } else { + e.fieldOK("source") } } // SetSubject implements EventWriter.SetSubject func (e *Event) SetSubject(s string) { if err := e.Context.SetSubject(s); err != nil { - panic(err) + e.fieldError("subject", err) + } else { + e.fieldOK("subject") } } // SetID implements EventWriter.SetID func (e *Event) SetID(id string) { if err := e.Context.SetID(id); err != nil { - panic(err) + e.fieldError("id", err) + } else { + e.fieldOK("id") } } // SetTime implements EventWriter.SetTime func (e *Event) SetTime(t time.Time) { if err := e.Context.SetTime(t); err != nil { - panic(err) + e.fieldError("time", err) + } else { + e.fieldOK("time") } } // SetDataSchema implements EventWriter.SetDataSchema func (e *Event) SetDataSchema(s string) { if err := e.Context.SetDataSchema(s); err != nil { - panic(err) + e.fieldError("dataschema", err) + } else { + e.fieldOK("dataschema") } } // SetDataContentType implements EventWriter.SetDataContentType func (e *Event) SetDataContentType(ct string) { if err := e.Context.SetDataContentType(ct); err != nil { - panic(err) + e.fieldError("datacontenttype", err) + } else { + e.fieldOK("datacontenttype") } } // DeprecatedSetDataContentEncoding implements EventWriter.DeprecatedSetDataContentEncoding func (e *Event) SetDataContentEncoding(enc string) { if err := e.Context.DeprecatedSetDataContentEncoding(enc); err != nil { - panic(err) + e.fieldError("datacontentencoding", err) + } else { + e.fieldOK("datacontentencoding") } } // SetExtension implements EventWriter.SetExtension func (e *Event) SetExtension(name string, obj interface{}) { if err := e.Context.SetExtension(name, obj); err != nil { - panic(err) + e.fieldError("extension:"+name, err) + } else { + e.fieldOK("extension:" + name) } } diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext.go similarity index 88% rename from vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext.go rename to vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext.go index a0309e495d..5ad2374349 100644 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext.go @@ -1,4 +1,4 @@ -package cloudevents +package event import "time" @@ -57,8 +57,6 @@ type EventContextReader interface { // EventContextWriter are the methods required to be a writer of context // attributes. type EventContextWriter interface { - // SetSpecVersion sets the spec version of the context. - SetSpecVersion(string) error // SetType sets the type of the context. SetType(string) error // SetSource sets the source of the context. @@ -79,6 +77,8 @@ type EventContextWriter interface { // SetExtension sets the given interface onto the extension attributes // determined by the provided name. // + // This function fails in V1 if the name doesn't respect the regex ^[a-zA-Z0-9]+$ + // // Package ./types documents the types that are allowed as extension values. SetExtension(string, interface{}) error } @@ -86,16 +86,6 @@ type EventContextWriter interface { // EventContextConverter are the methods that allow for event version // conversion. type EventContextConverter interface { - // AsV01 provides a translation from whatever the "native" encoding of the - // CloudEvent was to the equivalent in v0.1 field names, moving fields to or - // from extensions as necessary. - AsV01() *EventContextV01 - - // AsV02 provides a translation from whatever the "native" encoding of the - // CloudEvent was to the equivalent in v0.2 field names, moving fields to or - // from extensions as necessary. - AsV02() *EventContextV02 - // AsV03 provides a translation from whatever the "native" encoding of the // CloudEvent was to the equivalent in v0.3 field names, moving fields to or // from extensions as necessary. diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v03.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03.go similarity index 80% rename from vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v03.go rename to vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03.go index 2e714d1876..f89c06fc4a 100644 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v03.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03.go @@ -1,12 +1,13 @@ -package cloudevents +package event import ( "encoding/json" "fmt" + "mime" "sort" "strings" - "github.com/cloudevents/sdk-go/pkg/cloudevents/types" + "github.com/cloudevents/sdk-go/v2/types" ) const ( @@ -17,12 +18,10 @@ const ( // EventContextV03 represents the non-data attributes of a CloudEvents v0.3 // event. type EventContextV03 struct { - // SpecVersion - The version of the CloudEvents specification used by the event. - SpecVersion string `json:"specversion"` // Type - The type of the occurrence which has happened. Type string `json:"type"` // Source - A URI describing the event producer. - Source types.URLRef `json:"source"` + Source types.URIRef `json:"source"` // Subject - The subject of the event in the context of the event producer // (identified by `source`). Subject *string `json:"subject,omitempty"` @@ -31,9 +30,8 @@ type EventContextV03 struct { // Time - A Timestamp when the event happened. Time *types.Timestamp `json:"time,omitempty"` // DataSchema - A link to the schema that the `data` attribute adheres to. - SchemaURL *types.URLRef `json:"schemaurl,omitempty"` + SchemaURL *types.URIRef `json:"schemaurl,omitempty"` // GetDataMediaType - A MIME (RFC2046) string describing the media type of `data`. - // TODO: Should an empty string assume `application/json`, `application/octet-stream`, or auto-detect the content? DataContentType *string `json:"datacontenttype,omitempty"` // DeprecatedDataContentEncoding describes the content encoding for the `data` attribute. Valid: nil, `Base64`. DataContentEncoding *string `json:"datacontentencoding,omitempty"` @@ -82,6 +80,10 @@ func (ec *EventContextV03) SetExtension(name string, value interface{}) error { } if value == nil { delete(ec.Extensions, name) + if len(ec.Extensions) == 0 { + ec.Extensions = nil + } + return nil } else { v, err := types.Validate(value) if err == nil { @@ -89,61 +91,42 @@ func (ec *EventContextV03) SetExtension(name string, value interface{}) error { } return err } - return nil } // Clone implements EventContextConverter.Clone func (ec EventContextV03) Clone() EventContext { - return ec.AsV03() -} - -// AsV01 implements EventContextConverter.AsV01 -func (ec EventContextV03) AsV01() *EventContextV01 { - ecv2 := ec.AsV02() - return ecv2.AsV01() -} - -// AsV02 implements EventContextConverter.AsV02 -func (ec EventContextV03) AsV02() *EventContextV02 { - ret := EventContextV02{ - SpecVersion: CloudEventsVersionV02, - ID: ec.ID, - Time: ec.Time, - Type: ec.Type, - SchemaURL: ec.SchemaURL, - ContentType: ec.DataContentType, - Source: ec.Source, - Extensions: make(map[string]interface{}), - } - // Subject was introduced in 0.3, so put it in an extension for 0.2. - if ec.Subject != nil { - _ = ret.SetExtension(SubjectKey, *ec.Subject) + ec03 := ec.AsV03() + ec03.Source = types.Clone(ec.Source).(types.URIRef) + if ec.Time != nil { + ec03.Time = types.Clone(ec.Time).(*types.Timestamp) } - // DeprecatedDataContentEncoding was introduced in 0.3, so put it in an extension for 0.2. - if ec.DataContentEncoding != nil { - _ = ret.SetExtension(DataContentEncodingKey, *ec.DataContentEncoding) + if ec.SchemaURL != nil { + ec03.SchemaURL = types.Clone(ec.SchemaURL).(*types.URIRef) } - if ec.Extensions != nil { - for k, v := range ec.Extensions { - ret.Extensions[k] = v - } + ec03.Extensions = ec.cloneExtensions() + return ec03 +} + +func (ec *EventContextV03) cloneExtensions() map[string]interface{} { + old := ec.Extensions + if old == nil { + return nil } - if len(ret.Extensions) == 0 { - ret.Extensions = nil + new := make(map[string]interface{}, len(ec.Extensions)) + for k, v := range old { + new[k] = types.Clone(v) } - return &ret + return new } // AsV03 implements EventContextConverter.AsV03 func (ec EventContextV03) AsV03() *EventContextV03 { - ec.SpecVersion = CloudEventsVersionV03 return &ec } // AsV04 implements EventContextConverter.AsV04 func (ec EventContextV03) AsV1() *EventContextV1 { ret := EventContextV1{ - SpecVersion: CloudEventsVersionV1, ID: ec.ID, Time: ec.Time, Type: ec.Type, @@ -192,16 +175,6 @@ func (ec EventContextV03) Validate() error { errors = append(errors, "type: MUST be a non-empty string") } - // specversion - // Type: String - // Constraints: - // REQUIRED - // MUST be a non-empty string - specVersion := strings.TrimSpace(ec.SpecVersion) - if specVersion == "" { - errors = append(errors, "specversion: MUST be a non-empty string") - } - // source // Type: URI-reference // Constraints: @@ -264,8 +237,12 @@ func (ec EventContextV03) Validate() error { if ec.DataContentType != nil { dataContentType := strings.TrimSpace(*ec.DataContentType) if dataContentType == "" { - // TODO: need to test for RFC 2046 errors = append(errors, "datacontenttype: if present, MUST adhere to the format specified in RFC 2046") + } else { + _, _, err := mime.ParseMediaType(dataContentType) + if err != nil { + errors = append(errors, fmt.Sprintf("datacontenttype: failed to parse RFC 2046 media type, %s", err.Error())) + } } } @@ -278,7 +255,6 @@ func (ec EventContextV03) Validate() error { if ec.DataContentEncoding != nil { dataContentEncoding := strings.ToLower(strings.TrimSpace(*ec.DataContentEncoding)) if dataContentEncoding != Base64 { - // TODO: need to test for RFC 2046 errors = append(errors, "datacontentencoding: if present, MUST adhere to RFC 2045 Section 6.1") } } @@ -295,7 +271,7 @@ func (ec EventContextV03) String() string { b.WriteString("Context Attributes,\n") - b.WriteString(" specversion: " + ec.SpecVersion + "\n") + b.WriteString(" specversion: " + CloudEventsVersionV03 + "\n") b.WriteString(" type: " + ec.Type + "\n") b.WriteString(" source: " + ec.Source.String() + "\n") if ec.Subject != nil { diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v03_reader.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_reader.go similarity index 96% rename from vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v03_reader.go rename to vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_reader.go index 2b3cc207fc..8e6eec5caa 100644 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v03_reader.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_reader.go @@ -1,4 +1,4 @@ -package cloudevents +package event import ( "fmt" @@ -8,9 +8,6 @@ import ( // GetSpecVersion implements EventContextReader.GetSpecVersion func (ec EventContextV03) GetSpecVersion() string { - if ec.SpecVersion != "" { - return ec.SpecVersion - } return CloudEventsVersionV03 } diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v03_writer.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_writer.go similarity index 81% rename from vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v03_writer.go rename to vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_writer.go index 0c1bb8428e..94748c67c5 100644 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v03_writer.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v03_writer.go @@ -1,27 +1,17 @@ -package cloudevents +package event import ( "errors" - "fmt" "net/url" "strings" "time" - "github.com/cloudevents/sdk-go/pkg/cloudevents/types" + "github.com/cloudevents/sdk-go/v2/types" ) // Adhere to EventContextWriter var _ EventContextWriter = (*EventContextV03)(nil) -// SetSpecVersion implements EventContextWriter.SetSpecVersion -func (ec *EventContextV03) SetSpecVersion(v string) error { - if v != CloudEventsVersionV03 { - return fmt.Errorf("invalid version %q, expecting %q", v, CloudEventsVersionV03) - } - ec.SpecVersion = CloudEventsVersionV03 - return nil -} - // SetDataContentType implements EventContextWriter.SetDataContentType func (ec *EventContextV03) SetDataContentType(ct string) error { ct = strings.TrimSpace(ct) @@ -46,7 +36,7 @@ func (ec *EventContextV03) SetSource(u string) error { if err != nil { return err } - ec.Source = types.URLRef{URL: *pu} + ec.Source = types.URIRef{URL: *pu} return nil } @@ -92,7 +82,7 @@ func (ec *EventContextV03) SetDataSchema(u string) error { if err != nil { return err } - ec.SchemaURL = &types.URLRef{URL: *pu} + ec.SchemaURL = &types.URIRef{URL: *pu} return nil } diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v1.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1.go similarity index 86% rename from vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v1.go rename to vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1.go index c7bda117a8..d1d117983f 100644 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v1.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1.go @@ -1,4 +1,4 @@ -package cloudevents +package event import ( "errors" @@ -7,7 +7,7 @@ import ( "sort" "strings" - "github.com/cloudevents/sdk-go/pkg/cloudevents/types" + "github.com/cloudevents/sdk-go/v2/types" ) // WIP: AS OF SEP 20, 2019 @@ -26,9 +26,6 @@ type EventContextV1 struct { // Source - A URI describing the event producer. // +required Source types.URIRef `json:"source"` - // SpecVersion - The version of the CloudEvents specification used by the event. - // +required - SpecVersion string `json:"specversion"` // Type - The type of the occurrence which has happened. // +required Type string `json:"type"` @@ -73,8 +70,9 @@ func (ec EventContextV1) ExtensionAs(name string, obj interface{}) error { } // SetExtension adds the extension 'name' with value 'value' to the CloudEvents context. +// This function fails if the name doesn't respect the regex ^[a-zA-Z0-9]+$ func (ec *EventContextV1) SetExtension(name string, value interface{}) error { - if !IsAlphaNumericLowercaseLetters(name) { + if !IsAlphaNumeric(name) { return errors.New("bad key, CloudEvents attribute names MUST consist of lower-case letters ('a' to 'z') or digits ('0' to '9') from the ASCII character set") } @@ -84,6 +82,9 @@ func (ec *EventContextV1) SetExtension(name string, value interface{}) error { } if value == nil { delete(ec.Extensions, name) + if len(ec.Extensions) == 0 { + ec.Extensions = nil + } return nil } else { v, err := types.Validate(value) // Ensure it's a legal CE attribute value @@ -96,39 +97,46 @@ func (ec *EventContextV1) SetExtension(name string, value interface{}) error { // Clone implements EventContextConverter.Clone func (ec EventContextV1) Clone() EventContext { - return ec.AsV1() -} - -// AsV01 implements EventContextConverter.AsV01 -func (ec EventContextV1) AsV01() *EventContextV01 { - ecv2 := ec.AsV02() - return ecv2.AsV01() + ec1 := ec.AsV1() + ec1.Source = types.Clone(ec.Source).(types.URIRef) + if ec.Time != nil { + ec1.Time = types.Clone(ec.Time).(*types.Timestamp) + } + if ec.DataSchema != nil { + ec1.DataSchema = types.Clone(ec.DataSchema).(*types.URI) + } + ec1.Extensions = ec.cloneExtensions() + return ec1 } -// AsV02 implements EventContextConverter.AsV02 -func (ec EventContextV1) AsV02() *EventContextV02 { - ecv3 := ec.AsV03() - return ecv3.AsV02() +func (ec *EventContextV1) cloneExtensions() map[string]interface{} { + old := ec.Extensions + if old == nil { + return nil + } + new := make(map[string]interface{}, len(ec.Extensions)) + for k, v := range old { + new[k] = types.Clone(v) + } + return new } // AsV03 implements EventContextConverter.AsV03 func (ec EventContextV1) AsV03() *EventContextV03 { ret := EventContextV03{ - SpecVersion: CloudEventsVersionV03, ID: ec.ID, Time: ec.Time, Type: ec.Type, DataContentType: ec.DataContentType, - Source: types.URLRef{URL: ec.Source.URL}, + Source: types.URIRef{URL: ec.Source.URL}, Subject: ec.Subject, Extensions: make(map[string]interface{}), } if ec.DataSchema != nil { - ret.SchemaURL = &types.URLRef{URL: ec.DataSchema.URL} + ret.SchemaURL = &types.URIRef{URL: ec.DataSchema.URL} } - // TODO: DeprecatedDataContentEncoding needs to be moved to extensions. if ec.Extensions != nil { for k, v := range ec.Extensions { k = strings.ToLower(k) @@ -151,7 +159,6 @@ func (ec EventContextV1) AsV03() *EventContextV03 { // AsV04 implements EventContextConverter.AsV04 func (ec EventContextV1) AsV1() *EventContextV1 { - ec.SpecVersion = CloudEventsVersionV1 return &ec } @@ -183,16 +190,6 @@ func (ec EventContextV1) Validate() error { errors = append(errors, "source: REQUIRED") } - // specversion - // Type: String - // Constraints: - // REQUIRED - // MUST be a non-empty string - specVersion := strings.TrimSpace(ec.SpecVersion) - if specVersion == "" { - errors = append(errors, "specversion: MUST be a non-empty string") - } - // type // Type: String // Constraints: @@ -218,7 +215,7 @@ func (ec EventContextV1) Validate() error { } else { _, _, err := mime.ParseMediaType(dataContentType) if err != nil { - errors = append(errors, fmt.Sprintf("datacontenttype: failed to parse media type, %s", err.Error())) + errors = append(errors, fmt.Sprintf("datacontenttype: failed to parse RFC 2046 media type, %s", err.Error())) } } } @@ -267,7 +264,7 @@ func (ec EventContextV1) String() string { b.WriteString("Context Attributes,\n") - b.WriteString(" specversion: " + ec.SpecVersion + "\n") + b.WriteString(" specversion: " + CloudEventsVersionV1 + "\n") b.WriteString(" type: " + ec.Type + "\n") b.WriteString(" source: " + ec.Source.String() + "\n") if ec.Subject != nil { diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v1_reader.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_reader.go similarity index 95% rename from vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v1_reader.go rename to vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_reader.go index e3f329d31b..64f1a919b3 100644 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v1_reader.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_reader.go @@ -1,4 +1,4 @@ -package cloudevents +package event import ( "fmt" @@ -8,10 +8,7 @@ import ( // GetSpecVersion implements EventContextReader.GetSpecVersion func (ec EventContextV1) GetSpecVersion() string { - if ec.SpecVersion != "" { - return ec.SpecVersion - } - return CloudEventsVersionV03 + return CloudEventsVersionV1 } // GetDataContentType implements EventContextReader.GetDataContentType @@ -80,6 +77,9 @@ func (ec EventContextV1) DeprecatedGetDataContentEncoding() string { // GetExtensions implements EventContextReader.GetExtensions func (ec EventContextV1) GetExtensions() map[string]interface{} { + if len(ec.Extensions) == 0 { + return nil + } // For now, convert the extensions of v1.0 to the pre-v1.0 style. ext := make(map[string]interface{}, len(ec.Extensions)) for k, v := range ec.Extensions { diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v1_writer.go b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_writer.go similarity index 84% rename from vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v1_writer.go rename to vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_writer.go index dc33ba2f6b..1ec29e65e4 100644 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/eventcontext_v1_writer.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/eventcontext_v1_writer.go @@ -1,27 +1,17 @@ -package cloudevents +package event import ( "errors" - "fmt" "net/url" "strings" "time" - "github.com/cloudevents/sdk-go/pkg/cloudevents/types" + "github.com/cloudevents/sdk-go/v2/types" ) // Adhere to EventContextWriter var _ EventContextWriter = (*EventContextV1)(nil) -// SetSpecVersion implements EventContextWriter.SetSpecVersion -func (ec *EventContextV1) SetSpecVersion(v string) error { - if v != CloudEventsVersionV1 { - return fmt.Errorf("invalid version %q, expecting %q", v, CloudEventsVersionV1) - } - ec.SpecVersion = CloudEventsVersionV1 - return nil -} - // SetDataContentType implements EventContextWriter.SetDataContentType func (ec *EventContextV1) SetDataContentType(ct string) error { ct = strings.TrimSpace(ct) diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/extensions.go b/vendor/github.com/cloudevents/sdk-go/v2/event/extensions.go similarity index 56% rename from vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/extensions.go rename to vendor/github.com/cloudevents/sdk-go/v2/event/extensions.go index c8ba54846a..4a202e5e48 100644 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/extensions.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/event/extensions.go @@ -1,4 +1,4 @@ -package cloudevents +package event import ( "regexp" @@ -9,12 +9,6 @@ const ( // DataContentEncodingKey is the key to DeprecatedDataContentEncoding for versions that do not support data content encoding // directly. DataContentEncodingKey = "datacontentencoding" - - // EventTypeVersionKey is the key to EventTypeVersion for versions that do not support event type version directly. - EventTypeVersionKey = "eventtypeversion" - - // SubjectKey is the key to Subject for versions that do not support subject directly. - SubjectKey = "subject" ) func caseInsensitiveSearch(key string, space map[string]interface{}) (interface{}, bool) { @@ -27,4 +21,4 @@ func caseInsensitiveSearch(key string, space map[string]interface{}) (interface{ return nil, false } -var IsAlphaNumericLowercaseLetters = regexp.MustCompile(`^[a-z0-9]+$`).MatchString +var IsAlphaNumeric = regexp.MustCompile(`^[a-zA-Z0-9]+$`).MatchString diff --git a/vendor/github.com/cloudevents/sdk-go/v2/extensions/distributed_tracing_extension.go b/vendor/github.com/cloudevents/sdk-go/v2/extensions/distributed_tracing_extension.go new file mode 100644 index 0000000000..9135ef341c --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/extensions/distributed_tracing_extension.go @@ -0,0 +1,128 @@ +package extensions + +import ( + "context" + "reflect" + "strings" + + "github.com/cloudevents/sdk-go/v2/event" + + "github.com/cloudevents/sdk-go/v2/types" + "github.com/lightstep/tracecontext.go/traceparent" + "github.com/lightstep/tracecontext.go/tracestate" + "go.opencensus.io/trace" + octs "go.opencensus.io/trace/tracestate" +) + +const ( + TraceParentExtension = "traceparent" + TraceStateExtension = "tracestate" +) + +// DistributedTracingExtension represents the extension for cloudevents context +type DistributedTracingExtension struct { + TraceParent string `json:"traceparent"` + TraceState string `json:"tracestate"` +} + +// AddTracingAttributes adds the tracing attributes traceparent and tracestate to the cloudevents context +func (d DistributedTracingExtension) AddTracingAttributes(e event.EventWriter) { + if d.TraceParent != "" { + value := reflect.ValueOf(d) + typeOf := value.Type() + + for i := 0; i < value.NumField(); i++ { + k := strings.ToLower(typeOf.Field(i).Name) + v := value.Field(i).Interface() + if k == TraceStateExtension && v == "" { + continue + } + e.SetExtension(k, v) + } + } +} + +func GetDistributedTracingExtension(event event.Event) (DistributedTracingExtension, bool) { + if tp, ok := event.Extensions()[TraceParentExtension]; ok { + if tpStr, err := types.ToString(tp); err == nil { + var tsStr string + if ts, ok := event.Extensions()[TraceStateExtension]; ok { + tsStr, _ = types.ToString(ts) + } + return DistributedTracingExtension{TraceParent: tpStr, TraceState: tsStr}, true + } + } + return DistributedTracingExtension{}, false +} + +// FromSpanContext populates DistributedTracingExtension from a SpanContext. +func FromSpanContext(sc trace.SpanContext) DistributedTracingExtension { + tp := traceparent.TraceParent{ + TraceID: sc.TraceID, + SpanID: sc.SpanID, + Flags: traceparent.Flags{ + Recorded: sc.IsSampled(), + }, + } + + entries := make([]string, 0, len(sc.Tracestate.Entries())) + for _, entry := range sc.Tracestate.Entries() { + entries = append(entries, strings.Join([]string{entry.Key, entry.Value}, "=")) + } + + return DistributedTracingExtension{ + TraceParent: tp.String(), + TraceState: strings.Join(entries, ","), + } +} + +// ToSpanContext creates a SpanContext from a DistributedTracingExtension instance. +func (d DistributedTracingExtension) ToSpanContext() (trace.SpanContext, error) { + tp, err := traceparent.ParseString(d.TraceParent) + if err != nil { + return trace.SpanContext{}, err + } + sc := trace.SpanContext{ + TraceID: tp.TraceID, + SpanID: tp.SpanID, + } + if tp.Flags.Recorded { + sc.TraceOptions |= 1 + } + + if ts, err := tracestate.ParseString(d.TraceState); err == nil { + entries := make([]octs.Entry, 0, len(ts)) + for _, member := range ts { + var key string + if member.Tenant != "" { + // Due to github.com/lightstep/tracecontext.go/issues/6, + // the meaning of Vendor and Tenant are swapped here. + key = member.Vendor + "@" + member.Tenant + } else { + key = member.Vendor + } + entries = append(entries, octs.Entry{Key: key, Value: member.Value}) + } + sc.Tracestate, _ = octs.New(nil, entries...) + } + + return sc, nil +} + +func (d DistributedTracingExtension) StartChildSpan(ctx context.Context, name string, opts ...trace.StartOption) (context.Context, *trace.Span) { + if sc, err := d.ToSpanContext(); err == nil { + tSpan := trace.FromContext(ctx) + ctx, span := trace.StartSpanWithRemoteParent(ctx, name, sc, opts...) + if tSpan != nil { + // Add link to the previous in-process trace. + tsc := tSpan.SpanContext() + span.AddLink(trace.Link{ + TraceID: tsc.TraceID, + SpanID: tsc.SpanID, + Type: trace.LinkTypeParent, + }) + } + return ctx, span + } + return ctx, nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/observability/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/observability/doc.go similarity index 100% rename from vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/observability/doc.go rename to vendor/github.com/cloudevents/sdk-go/v2/observability/doc.go diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/observability/keys.go b/vendor/github.com/cloudevents/sdk-go/v2/observability/keys.go similarity index 80% rename from vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/observability/keys.go rename to vendor/github.com/cloudevents/sdk-go/v2/observability/keys.go index f032b10ecf..afadddcf5a 100644 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/observability/keys.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/observability/keys.go @@ -12,6 +12,9 @@ var ( ) const ( + // ClientSpanName is the key used to start spans from the client. + ClientSpanName = "cloudevents.client" + // ResultError is a shared result tag value for error. ResultError = "error" // ResultOK is a shared result tag value for success. diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/observability/observer.go b/vendor/github.com/cloudevents/sdk-go/v2/observability/observer.go similarity index 64% rename from vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/observability/observer.go rename to vendor/github.com/cloudevents/sdk-go/v2/observability/observer.go index 76e7b12fda..b27ffa9735 100644 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/observability/observer.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/observability/observer.go @@ -7,20 +7,18 @@ import ( "go.opencensus.io/stats" "go.opencensus.io/tag" - "go.opencensus.io/trace" ) // Observable represents the the customization used by the Reporter for a given // measurement and trace for a single method. type Observable interface { - TraceName() string MethodName() string LatencyMs() *stats.Float64Measure } -// Reporter represents a running latency counter and trace span. When Error or -// OK are called, the latency is calculated and the trace space is ended. Error -// or OK are only allowed to be called once. +// Reporter represents a running latency counter. When Error or OK are +// called, the latency is calculated. Error or OK are only allowed to +// be called once. type Reporter interface { Error() OK() @@ -28,7 +26,6 @@ type Reporter interface { type reporter struct { ctx context.Context - span *trace.Span on Observable start time.Time once sync.Once @@ -39,30 +36,14 @@ func LatencyTags() []tag.Key { return []tag.Key{KeyMethod, KeyResult} } -var ( - // Tracing is disabled by default. It is very useful for profiling an - // application. - tracingEnabled = false -) - -// EnableTracing allows control over if tracing is enabled for the sdk. -// Default is false. This applies to all of the -// `github.com/cloudevents/sdk-go/...` package. -func EnableTracing(enabled bool) { - tracingEnabled = enabled -} +// Deprecated. Tracing is always enabled. +func EnableTracing(enabled bool) {} -// NewReporter creates and returns a reporter wrapping the provided Observable, -// and injects a trace span into the context. +// NewReporter creates and returns a reporter wrapping the provided Observable. func NewReporter(ctx context.Context, on Observable) (context.Context, Reporter) { - var span *trace.Span - if tracingEnabled { - ctx, span = trace.StartSpan(ctx, on.TraceName()) - } r := &reporter{ ctx: ctx, on: on, - span: span, start: time.Now(), } r.tagMethod() @@ -80,9 +61,6 @@ func (r *reporter) tagMethod() { func (r *reporter) record() { ms := float64(time.Since(r.start) / time.Millisecond) stats.Record(r.ctx, r.on.LatencyMs().M(ms)) - if r.span != nil { - r.span.End() - } } // Error records the result as an error. diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/doc.go similarity index 64% rename from vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/doc.go rename to vendor/github.com/cloudevents/sdk-go/v2/protocol/doc.go index c2cbadde0d..7483e58958 100644 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/doc.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/doc.go @@ -8,5 +8,16 @@ package, they should use the client package. This package is for infrastructure developers implementing new transports, or intermediary components like importers, channels or brokers. +Available protocols: + +* HTTP (using net/http) +* Kafka (using github.com/Shopify/sarama) +* AMQP (using pack.ag/amqp) +* Go Channels +* Nats +* Nats Streaming (stan) +* Google PubSub + */ -package transport + +package protocol diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/error.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/error.go similarity index 98% rename from vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/error.go rename to vendor/github.com/cloudevents/sdk-go/v2/protocol/error.go index bb4e8ec9f8..894cbbf3b1 100644 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/transport/error.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/error.go @@ -1,4 +1,4 @@ -package transport +package protocol import "fmt" diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/doc.go new file mode 100644 index 0000000000..0cc9391827 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/doc.go @@ -0,0 +1,5 @@ +package http + +/* +Module http implements an HTTP binding using net/http module +*/ diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/message.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/message.go new file mode 100644 index 0000000000..ecf57a64ad --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/message.go @@ -0,0 +1,130 @@ +package http + +import ( + "context" + "io" + nethttp "net/http" + "strings" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/binding/format" + "github.com/cloudevents/sdk-go/v2/binding/spec" +) + +const prefix = "Ce-" + +var specs = spec.WithPrefix(prefix) + +const ContentType = "Content-Type" +const ContentLength = "Content-Length" + +// Message holds the Header and Body of a HTTP Request or Response. +// The Message instance *must* be constructed from NewMessage function. +// This message *cannot* be read several times. In order to read it more times, buffer it using binding/buffering methods +type Message struct { + Header nethttp.Header + BodyReader io.ReadCloser + OnFinish func(error) error + + format format.Format + version spec.Version +} + +// Check if http.Message implements binding.Message +var _ binding.Message = (*Message)(nil) + +// NewMessage returns a binding.Message with header and data. +// The returned binding.Message *cannot* be read several times. In order to read it more times, buffer it using binding/buffering methods +func NewMessage(header nethttp.Header, body io.ReadCloser) *Message { + m := Message{Header: header} + if body != nil { + m.BodyReader = body + } + if m.format = format.Lookup(header.Get(ContentType)); m.format == nil { + m.version = specs.Version(m.Header.Get(specs.PrefixedSpecVersionName())) + } + return &m +} + +// NewMessageFromHttpRequest returns a binding.Message with header and data. +// The returned binding.Message *cannot* be read several times. In order to read it more times, buffer it using binding/buffering methods +func NewMessageFromHttpRequest(req *nethttp.Request) *Message { + if req == nil { + return nil + } + return NewMessage(req.Header, req.Body) +} + +// NewMessageFromHttpResponse returns a binding.Message with header and data. +// The returned binding.Message *cannot* be read several times. In order to read it more times, buffer it using binding/buffering methods +func NewMessageFromHttpResponse(resp *nethttp.Response) *Message { + if resp == nil { + return nil + } + msg := NewMessage(resp.Header, resp.Body) + return msg +} + +func (m *Message) ReadEncoding() binding.Encoding { + if m.version != nil { + return binding.EncodingBinary + } + if m.format != nil { + return binding.EncodingStructured + } + return binding.EncodingUnknown +} + +func (m *Message) ReadStructured(ctx context.Context, encoder binding.StructuredWriter) error { + if m.format == nil { + return binding.ErrNotStructured + } else { + return encoder.SetStructuredEvent(ctx, m.format, m.BodyReader) + } +} + +func (m *Message) ReadBinary(ctx context.Context, encoder binding.BinaryWriter) error { + if m.version == nil { + return binding.ErrNotBinary + } + + err := encoder.Start(ctx) + if err != nil { + return err + } + + for k, v := range m.Header { + if strings.HasPrefix(k, prefix) { + attr := m.version.Attribute(k) + if attr != nil { + err = encoder.SetAttribute(attr, v[0]) + } else { + err = encoder.SetExtension(strings.ToLower(strings.TrimPrefix(k, prefix)), v[0]) + } + } else if k == ContentType { + err = encoder.SetAttribute(m.version.AttributeFromKind(spec.DataContentType), v[0]) + } + if err != nil { + return err + } + } + + if m.BodyReader != nil { + err = encoder.SetData(m.BodyReader) + if err != nil { + return err + } + } + + return encoder.End(ctx) +} + +func (m *Message) Finish(err error) error { + if m.BodyReader != nil { + _ = m.BodyReader.Close() + } + if m.OnFinish != nil { + return m.OnFinish(err) + } + return nil +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/options.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/options.go new file mode 100644 index 0000000000..5ced0748b3 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/options.go @@ -0,0 +1,186 @@ +package http + +import ( + "fmt" + "net" + nethttp "net/http" + "net/url" + "strings" + "time" +) + +// Option is the function signature required to be considered an http.Option. +type Option func(*Protocol) error + +// WithTarget sets the outbound recipient of cloudevents when using an HTTP +// request. +func WithTarget(targetUrl string) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http target option can not set nil protocol") + } + targetUrl = strings.TrimSpace(targetUrl) + if targetUrl != "" { + var err error + var target *url.URL + target, err = url.Parse(targetUrl) + if err != nil { + return fmt.Errorf("http target option failed to parse target url: %s", err.Error()) + } + + p.Target = target + + if p.RequestTemplate == nil { + p.RequestTemplate = &nethttp.Request{ + Method: nethttp.MethodPost, + } + } + p.RequestTemplate.URL = target + + return nil + } + return fmt.Errorf("http target option was empty string") + } +} + +// WithHeader sets an additional default outbound header for all cloudevents +// when using an HTTP request. +func WithHeader(key, value string) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http header option can not set nil protocol") + } + key = strings.TrimSpace(key) + if key != "" { + if p.RequestTemplate == nil { + p.RequestTemplate = &nethttp.Request{ + Method: nethttp.MethodPost, + } + } + if p.RequestTemplate.Header == nil { + p.RequestTemplate.Header = nethttp.Header{} + } + p.RequestTemplate.Header.Add(key, value) + return nil + } + return fmt.Errorf("http header option was empty string") + } +} + +// WithShutdownTimeout sets the shutdown timeout when the http server is being shutdown. +func WithShutdownTimeout(timeout time.Duration) Option { + return func(t *Protocol) error { + if t == nil { + return fmt.Errorf("http shutdown timeout option can not set nil protocol") + } + t.ShutdownTimeout = &timeout + return nil + } +} + +func checkListen(t *Protocol, prefix string) error { + switch { + case t.Port != nil: + return fmt.Errorf("%v port already set", prefix) + case t.listener != nil: + return fmt.Errorf("%v listener already set", prefix) + } + return nil +} + +// WithPort sets the listening port for StartReceiver. +// Only one of WithListener or WithPort is allowed. +func WithPort(port int) Option { + return func(t *Protocol) error { + if t == nil { + return fmt.Errorf("http port option can not set nil protocol") + } + if port < 0 || port > 65535 { + return fmt.Errorf("http port option was given an invalid port: %d", port) + } + if err := checkListen(t, "http port option"); err != nil { + return err + } + t.setPort(port) + return nil + } +} + +// WithListener sets the listener for StartReceiver. +// Only one of WithListener or WithPort is allowed. +func WithListener(l net.Listener) Option { + return func(t *Protocol) error { + if t == nil { + return fmt.Errorf("http listener option can not set nil protocol") + } + if err := checkListen(t, "http port option"); err != nil { + return err + } + t.listener = l + _, err := t.listen() + return err + } +} + +// WithPath sets the path to receive cloudevents on for HTTP transports. +func WithPath(path string) Option { + return func(t *Protocol) error { + if t == nil { + return fmt.Errorf("http path option can not set nil protocol") + } + path = strings.TrimSpace(path) + if len(path) == 0 { + return fmt.Errorf("http path option was given an invalid path: %q", path) + } + t.Path = path + return nil + } +} + +// WithMethod sets the HTTP verb (GET, POST, PUT, etc.) to use +// when using an HTTP request. +func WithMethod(method string) Option { + return func(p *Protocol) error { + if p == nil { + return fmt.Errorf("http method option can not set nil protocol") + } + method = strings.TrimSpace(method) + if method != "" { + if p.RequestTemplate == nil { + p.RequestTemplate = &nethttp.Request{} + } + p.RequestTemplate.Method = method + return nil + } + return fmt.Errorf("http method option was empty string") + } +} + +// +// Middleware is a function that takes an existing http.Handler and wraps it in middleware, +// returning the wrapped http.Handler. +type Middleware func(next nethttp.Handler) nethttp.Handler + +// WithMiddleware adds an HTTP middleware to the transport. It may be specified multiple times. +// Middleware is applied to everything before it. For example +// `NewClient(WithMiddleware(foo), WithMiddleware(bar))` would result in `bar(foo(original))`. +func WithMiddleware(middleware Middleware) Option { + return func(t *Protocol) error { + if t == nil { + return fmt.Errorf("http middleware option can not set nil protocol") + } + t.middleware = append(t.middleware, middleware) + return nil + } +} + +// WithRoundTripper sets the HTTP RoundTripper. +func WithRoundTripper(roundTripper nethttp.RoundTripper) Option { + return func(t *Protocol) error { + if t == nil { + return fmt.Errorf("http round tripper option can not set nil protocol") + } + t.roundTripper = roundTripper + return nil + } +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol.go new file mode 100644 index 0000000000..0cb1b535eb --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol.go @@ -0,0 +1,269 @@ +package http + +import ( + "context" + "fmt" + "io" + "net" + "net/http" + "net/url" + "sync" + "time" + + "github.com/cloudevents/sdk-go/v2/protocol" + + "github.com/cloudevents/sdk-go/v2/binding" + cecontext "github.com/cloudevents/sdk-go/v2/context" +) + +const ( + // DefaultShutdownTimeout defines the default timeout given to the http.Server when calling Shutdown. + DefaultShutdownTimeout = time.Minute * 1 +) + +// Protocol acts as both a http client and a http handler. +type Protocol struct { + Target *url.URL + RequestTemplate *http.Request + transformers binding.TransformerFactories + Client *http.Client + incoming chan msgErr + + // To support Opener: + + // ShutdownTimeout defines the timeout given to the http.Server when calling Shutdown. + // If nil, DefaultShutdownTimeout is used. + ShutdownTimeout *time.Duration + + // Port is the port to bind the receiver to. Defaults to 8080. + Port *int + // Path is the path to bind the receiver to. Defaults to "/". + Path string + + // Receive Mutex + reMu sync.Mutex + // Handler is the handler the http Server will use. Use this to reuse the + // http server. If nil, the Protocol will create a one. + Handler *http.ServeMux + listener net.Listener + roundTripper http.RoundTripper + server *http.Server + handlerRegistered bool + middleware []Middleware +} + +func New(opts ...Option) (*Protocol, error) { + p := &Protocol{ + transformers: make(binding.TransformerFactories, 0), + incoming: make(chan msgErr), + } + if err := p.applyOptions(opts...); err != nil { + return nil, err + } + + if p.Client == nil { + p.Client = http.DefaultClient + } + + if p.roundTripper != nil { + p.Client.Transport = p.roundTripper + } + + if p.ShutdownTimeout == nil { + timeout := DefaultShutdownTimeout + p.ShutdownTimeout = &timeout + } + + return p, nil +} + +func (p *Protocol) applyOptions(opts ...Option) error { + for _, fn := range opts { + if err := fn(p); err != nil { + return err + } + } + return nil +} + +// Send implements binding.Sender +func (p *Protocol) Send(ctx context.Context, m binding.Message) error { + if ctx == nil { + return fmt.Errorf("nil Context") + } else if m == nil { + return fmt.Errorf("nil Message") + } + + _, err := p.Request(ctx, m) + return err +} + +// Request implements binding.Requester +func (p *Protocol) Request(ctx context.Context, m binding.Message) (binding.Message, error) { + if ctx == nil { + return nil, fmt.Errorf("nil Context") + } else if m == nil { + return nil, fmt.Errorf("nil Message") + } + + var err error + defer func() { _ = m.Finish(err) }() + + req := p.makeRequest(ctx) + + if p.Client == nil || req == nil || req.URL == nil { + return nil, fmt.Errorf("not initialized: %#v", p) + } + + if err = WriteRequest(ctx, m, req, p.transformers); err != nil { + return nil, err + } + resp, err := p.Client.Do(req) + if err != nil { + return nil, protocol.NewReceipt(false, "%w", err) + } + + var result protocol.Result + if resp.StatusCode/100 == 2 { + result = protocol.ResultACK + } else { + result = protocol.ResultNACK + } + + return NewMessage(resp.Header, resp.Body), NewResult(resp.StatusCode, "%w", result) +} + +func (p *Protocol) makeRequest(ctx context.Context) *http.Request { + // TODO: support custom headers from context? + req := &http.Request{ + Method: http.MethodPost, + Header: make(http.Header), + // TODO: HeaderFrom(ctx), + } + + if p.RequestTemplate != nil { + req.Method = p.RequestTemplate.Method + req.URL = p.RequestTemplate.URL + req.Close = p.RequestTemplate.Close + req.Host = p.RequestTemplate.Host + copyHeadersEnsure(p.RequestTemplate.Header, &req.Header) + } + + if p.Target != nil { + req.URL = p.Target + } + + // Override the default request with target from context. + if target := cecontext.TargetFrom(ctx); target != nil { + req.URL = target + } + return req.WithContext(ctx) +} + +// Ensure to is a non-nil map before copying +func copyHeadersEnsure(from http.Header, to *http.Header) { + if len(from) > 0 { + if *to == nil { + *to = http.Header{} + } + copyHeaders(from, *to) + } +} + +func copyHeaders(from, to http.Header) { + if from == nil || to == nil { + return + } + for header, values := range from { + for _, value := range values { + to.Add(header, value) + } + } +} + +// Receive the next incoming HTTP request as a CloudEvent. +// Returns non-nil error if the incoming HTTP request fails to parse as a CloudEvent +// Returns io.EOF if the receiver is closed. +func (p *Protocol) Receive(ctx context.Context) (binding.Message, error) { + if ctx == nil { + return nil, fmt.Errorf("nil Context") + } + + msg, fn, err := p.Respond(ctx) + // No-op the response. + defer func() { + if fn != nil { + _ = fn(ctx, nil, nil) + } + }() + return msg, err +} + +// Respond receives the next incoming HTTP request as a CloudEvent and waits +// for the response callback to invoked before continuing. +// Returns non-nil error if the incoming HTTP request fails to parse as a CloudEvent +// Returns io.EOF if the receiver is closed. +func (p *Protocol) Respond(ctx context.Context) (binding.Message, protocol.ResponseFn, error) { + if ctx == nil { + return nil, nil, fmt.Errorf("nil Context") + } + + select { + case in, ok := <-p.incoming: + if !ok { + return nil, nil, io.EOF + } + return in.msg, in.respFn, in.err + case <-ctx.Done(): + return nil, nil, io.EOF + } +} + +type msgErr struct { + msg *Message + respFn protocol.ResponseFn + err error +} + +// ServeHTTP implements http.Handler. +// Blocks until Message.Finish is called. +func (p *Protocol) ServeHTTP(rw http.ResponseWriter, req *http.Request) { + + m := NewMessageFromHttpRequest(req) + if m == nil || m.ReadEncoding() == binding.EncodingUnknown { + p.incoming <- msgErr{msg: nil, err: binding.ErrUnknownEncoding} + return // if there was no message, return. + } + + done := make(chan error) + + m.OnFinish = func(err error) error { + done <- err + return nil + } + + var fn protocol.ResponseFn = func(ctx context.Context, resp binding.Message, er protocol.Result) error { + + status := http.StatusOK + if er != nil { + var result *Result + if protocol.ResultAs(er, &result) { + if result.StatusCode > 100 && result.StatusCode < 600 { + status = result.StatusCode + } + } + } + if resp != nil { + err := WriteResponseWriter(ctx, resp, status, rw, p.transformers) + return resp.Finish(err) + } + rw.WriteHeader(status) + return nil + } + + p.incoming <- msgErr{msg: m, respFn: fn} // Send to Request + if err := <-done; err != nil { + fmt.Println("attempting to write an error out on response writer:", err) + http.Error(rw, fmt.Sprintf("cannot forward CloudEvent: %v", err), http.StatusInternalServerError) + } +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_lifecycle.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_lifecycle.go new file mode 100644 index 0000000000..039ccfa02c --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/protocol_lifecycle.go @@ -0,0 +1,139 @@ +package http + +import ( + "context" + "fmt" + "net" + "net/http" + "strings" + + "github.com/cloudevents/sdk-go/v2/protocol" + "go.opencensus.io/plugin/ochttp" + "go.opencensus.io/plugin/ochttp/propagation/tracecontext" +) + +var _ protocol.Opener = (*Protocol)(nil) + +func (e *Protocol) OpenInbound(ctx context.Context) error { + e.reMu.Lock() + defer e.reMu.Unlock() + + if e.Handler == nil { + e.Handler = http.NewServeMux() + } + + if !e.handlerRegistered { + // handler.Handle might panic if the user tries to use the same path as the sdk. + e.Handler.Handle(e.GetPath(), e) + e.handlerRegistered = true + } + + addr, err := e.listen() + if err != nil { + return err + } + + e.server = &http.Server{ + Addr: addr.String(), + Handler: &ochttp.Handler{ + Propagation: &tracecontext.HTTPFormat{}, + Handler: attachMiddleware(e.Handler, e.middleware), + FormatSpanName: formatSpanName, + }, + } + + // Shutdown + defer func() { + _ = e.server.Close() + e.server = nil + }() + + errChan := make(chan error, 1) + go func() { + errChan <- e.server.Serve(e.listener) + }() + + // nil check and default + shutdown := DefaultShutdownTimeout + if e.ShutdownTimeout != nil { + shutdown = *e.ShutdownTimeout + } + + // wait for the server to return or ctx.Done(). + select { + case <-ctx.Done(): + // Try a gracefully shutdown. + timeout := shutdown + ctx, cancel := context.WithTimeout(context.Background(), timeout) + defer cancel() + err := e.server.Shutdown(ctx) + <-errChan // Wait for server goroutine to exit + return err + case err := <-errChan: + return err + } +} + +// GetPort returns the listening port. +// Returns -1 if there is a listening error. +// Note this will call net.Listen() if the listener is not already started. +func (e *Protocol) GetPort() int { + // Ensure we have a listener and therefore a port. + if _, err := e.listen(); err == nil || e.Port != nil { + return *e.Port + } + return -1 +} + +func formatSpanName(r *http.Request) string { + return "cloudevents.http." + r.URL.Path +} + +func (e *Protocol) setPort(port int) { + if e.Port == nil { + e.Port = new(int) + } + *e.Port = port +} + +// listen if not already listening, update t.Port +func (e *Protocol) listen() (net.Addr, error) { + if e.listener == nil { + port := 8080 + if e.Port != nil { + port = *e.Port + if port < 0 || port > 65535 { + return nil, fmt.Errorf("invalid port %d", port) + } + } + var err error + if e.listener, err = net.Listen("tcp", fmt.Sprintf(":%d", port)); err != nil { + return nil, err + } + } + addr := e.listener.Addr() + if tcpAddr, ok := addr.(*net.TCPAddr); ok { + e.setPort(tcpAddr.Port) + } + return addr, nil +} + +// GetPath returns the path the transport is hosted on. If the path is '/', +// the transport will handle requests on any URI. To discover the true path +// a request was received on, inspect the context from Receive(cxt, ...) with +// TransportContextFrom(ctx). +func (e *Protocol) GetPath() string { + path := strings.TrimSpace(e.Path) + if len(path) > 0 { + return path + } + return "/" // default +} + +// attachMiddleware attaches the HTTP middleware to the specified handler. +func attachMiddleware(h http.Handler, middleware []Middleware) http.Handler { + for _, m := range middleware { + h = m(h) + } + return h +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/result.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/result.go new file mode 100644 index 0000000000..eefdf0f4df --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/result.go @@ -0,0 +1,47 @@ +package http + +import ( + "errors" + "fmt" + + "github.com/cloudevents/sdk-go/v2/protocol" +) + +// NewResult returns a fully populated http Result that should be used as +// a transport.Result. +func NewResult(statusCode int, messageFmt string, args ...interface{}) protocol.Result { + return &Result{ + StatusCode: statusCode, + Format: messageFmt, + Args: args, + } +} + +// Result wraps the fields required to make adjustments for http Responses. +type Result struct { + StatusCode int + Format string + Args []interface{} +} + +// make sure Result implements error. +var _ error = (*Result)(nil) + +// Is returns if the target error is a Result type checking target. +func (e *Result) Is(target error) bool { + if o, ok := target.(*Result); ok { + if e.StatusCode == o.StatusCode { + return true + } + return false + } + // Allow for wrapped errors. + err := fmt.Errorf(e.Format, e.Args...) + return errors.Is(err, target) +} + +// Error returns the string that is formed by using the format string with the +// provided args. +func (e *Result) Error() string { + return fmt.Sprintf("%d: %s", e.StatusCode, fmt.Sprintf(e.Format, e.Args...)) +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_request.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_request.go new file mode 100644 index 0000000000..14184a7b7a --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_request.go @@ -0,0 +1,131 @@ +package http + +import ( + "bytes" + "context" + "io" + "io/ioutil" + "net/http" + "strings" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/binding/format" + "github.com/cloudevents/sdk-go/v2/binding/spec" + "github.com/cloudevents/sdk-go/v2/types" +) + +// Fill the provided httpRequest with the message m. +// Using context you can tweak the encoding processing (more details on binding.Write documentation). +func WriteRequest(ctx context.Context, m binding.Message, httpRequest *http.Request, transformers ...binding.TransformerFactory) error { + structuredWriter := (*httpRequestWriter)(httpRequest) + binaryWriter := (*httpRequestWriter)(httpRequest) + + _, err := binding.Write( + ctx, + m, + structuredWriter, + binaryWriter, + transformers..., + ) + return err +} + +type httpRequestWriter http.Request + +func (b *httpRequestWriter) SetStructuredEvent(ctx context.Context, format format.Format, event io.Reader) error { + b.Header.Set(ContentType, format.MediaType()) + return b.setBody(event) +} + +func (b *httpRequestWriter) Start(ctx context.Context) error { + return nil +} + +func (b *httpRequestWriter) End(ctx context.Context) error { + return nil +} + +func (b *httpRequestWriter) SetData(data io.Reader) error { + return b.setBody(data) +} + +// setBody is a cherry-pick of the implementation in http.NewRequestWithContext +func (b *httpRequestWriter) setBody(body io.Reader) error { + rc, ok := body.(io.ReadCloser) + if !ok && body != nil { + rc = ioutil.NopCloser(body) + } + b.Body = rc + if body != nil { + switch v := body.(type) { + case *bytes.Buffer: + b.ContentLength = int64(v.Len()) + buf := v.Bytes() + b.GetBody = func() (io.ReadCloser, error) { + r := bytes.NewReader(buf) + return ioutil.NopCloser(r), nil + } + case *bytes.Reader: + b.ContentLength = int64(v.Len()) + snapshot := *v + b.GetBody = func() (io.ReadCloser, error) { + r := snapshot + return ioutil.NopCloser(&r), nil + } + case *strings.Reader: + b.ContentLength = int64(v.Len()) + snapshot := *v + b.GetBody = func() (io.ReadCloser, error) { + r := snapshot + return ioutil.NopCloser(&r), nil + } + default: + // This is where we'd set it to -1 (at least + // if body != NoBody) to mean unknown, but + // that broke people during the Go 1.8 testing + // period. People depend on it being 0 I + // guess. Maybe retry later. See Issue 18117. + } + // For client requests, Request.ContentLength of 0 + // means either actually 0, or unknown. The only way + // to explicitly say that the ContentLength is zero is + // to set the Body to nil. But turns out too much code + // depends on NewRequest returning a non-nil Body, + // so we use a well-known ReadCloser variable instead + // and have the http package also treat that sentinel + // variable to mean explicitly zero. + if b.GetBody != nil && b.ContentLength == 0 { + b.Body = http.NoBody + b.GetBody = func() (io.ReadCloser, error) { return http.NoBody, nil } + } + } + return nil +} + +func (b *httpRequestWriter) SetAttribute(attribute spec.Attribute, value interface{}) error { + // Http headers, everything is a string! + s, err := types.Format(value) + if err != nil { + return err + } + + if attribute.Kind() == spec.DataContentType { + b.Header.Add(ContentType, s) + } else { + b.Header.Add(prefix+attribute.Name(), s) + } + return nil +} + +func (b *httpRequestWriter) SetExtension(name string, value interface{}) error { + // Http headers, everything is a string! + s, err := types.Format(value) + if err != nil { + return err + } + b.Header.Add(prefix+name, s) + return nil +} + +var _ binding.StructuredWriter = (*httpRequestWriter)(nil) // Test it conforms to the interface +var _ binding.BinaryWriter = (*httpRequestWriter)(nil) // Test it conforms to the interface diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_responsewriter.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_responsewriter.go new file mode 100644 index 0000000000..1baa69fba1 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/http/write_responsewriter.go @@ -0,0 +1,118 @@ +package http + +import ( + "bytes" + "context" + "io" + "net/http" + "strconv" + "strings" + + "github.com/cloudevents/sdk-go/v2/binding" + "github.com/cloudevents/sdk-go/v2/binding/format" + "github.com/cloudevents/sdk-go/v2/binding/spec" + "github.com/cloudevents/sdk-go/v2/types" +) + +// Write out to the the provided httpResponseWriter with the message m. +// Using context you can tweak the encoding processing (more details on binding.Write documentation). +func WriteResponseWriter(ctx context.Context, m binding.Message, status int, rw http.ResponseWriter, transformers ...binding.TransformerFactory) error { + if status < 200 || status >= 600 { + status = http.StatusOK + } + writer := &httpResponseWriter{rw: rw, status: status} + + _, err := binding.Write( + ctx, + m, + writer, + writer, + transformers..., + ) + return err +} + +type httpResponseWriter struct { + rw http.ResponseWriter + status int + body io.Reader +} + +func (b *httpResponseWriter) SetStructuredEvent(ctx context.Context, format format.Format, event io.Reader) error { + b.rw.Header().Set(ContentType, format.MediaType()) + b.body = event + return b.finalizeWriter() +} + +func (b *httpResponseWriter) Start(ctx context.Context) error { + return nil +} + +func (b *httpResponseWriter) SetAttribute(attribute spec.Attribute, value interface{}) error { + // Http headers, everything is a string! + s, err := types.Format(value) + if err != nil { + return err + } + + if attribute.Kind() == spec.DataContentType { + b.rw.Header().Add(ContentType, s) + } else { + b.rw.Header().Add(prefix+attribute.Name(), s) + } + return nil +} + +func (b *httpResponseWriter) SetExtension(name string, value interface{}) error { + // Http headers, everything is a string! + s, err := types.Format(value) + if err != nil { + return err + } + b.rw.Header().Add(prefix+name, s) + return nil +} + +func (b *httpResponseWriter) SetData(reader io.Reader) error { + b.body = reader + return nil +} + +func (b *httpResponseWriter) finalizeWriter() error { + if b.body != nil { + // Try to figure it out if we have a content-length + contentLength := -1 + switch v := b.body.(type) { + case *bytes.Buffer: + contentLength = v.Len() + case *bytes.Reader: + contentLength = v.Len() + case *strings.Reader: + contentLength = v.Len() + } + + if contentLength != -1 { + b.rw.Header().Add("Content-length", strconv.Itoa(contentLength)) + } + + // Finalize the headers. + b.rw.WriteHeader(b.status) + + // Write body. + _, err := io.Copy(b.rw, b.body) + if err != nil { + return err + } + } else { + // Finalize the headers. + b.rw.WriteHeader(b.status) + } + return nil +} + +func (b *httpResponseWriter) End(ctx context.Context) error { + return b.finalizeWriter() +} + +var _ binding.StructuredWriter = (*httpResponseWriter)(nil) // Test it conforms to the interface +var _ binding.BinaryWriter = (*httpResponseWriter)(nil) // Test it conforms to the interface diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/inbound.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/inbound.go new file mode 100644 index 0000000000..9fb0a71c1e --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/inbound.go @@ -0,0 +1,41 @@ +package protocol + +import ( + "context" + + "github.com/cloudevents/sdk-go/v2/binding" +) + +// Receiver receives messages. +type Receiver interface { + // Receive blocks till a message is received or ctx expires. + // + // A non-nil error means the receiver is closed. + // io.EOF means it closed cleanly, any other value indicates an error. + Receive(ctx context.Context) (binding.Message, error) +} + +// ReceiveCloser is a Receiver that can be closed. +type ReceiveCloser interface { + Receiver + Closer +} + +// ResponseFn is the function callback provided from Responder.Respond to allow +// for a receiver to "reply" to a message it receives. +type ResponseFn func(ctx context.Context, m binding.Message, r Result) error + +// Responder receives messages and is given a callback to respond. +type Responder interface { + // Receive blocks till a message is received or ctx expires. + // + // A non-nil error means the receiver is closed. + // io.EOF means it closed cleanly, any other value indicates an error. + Respond(ctx context.Context) (binding.Message, ResponseFn, error) +} + +// ResponderCloser is a Responder that can be closed. +type ResponderCloser interface { + Responder + Closer +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/lifecycle.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/lifecycle.go new file mode 100644 index 0000000000..bb4de5e897 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/lifecycle.go @@ -0,0 +1,16 @@ +package protocol + +import ( + "context" +) + +// Opener is the common interface for things that need to be opened. +type Opener interface { + // Blocking call. Context is used to cancel. + OpenInbound(ctx context.Context) error +} + +// Closer is the common interface for things that can be closed. +type Closer interface { + Close(ctx context.Context) error +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/outbound.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/outbound.go new file mode 100644 index 0000000000..ff5f3973a6 --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/outbound.go @@ -0,0 +1,42 @@ +package protocol + +import ( + "context" + + "github.com/cloudevents/sdk-go/v2/binding" +) + +// Sender sends messages. +type Sender interface { + // Send a message. + // + // Send returns when the "outbound" message has been sent. The Sender may + // still be expecting acknowledgment or holding other state for the message. + // + // m.Finish() is called when sending is finished (both succeeded or failed): + // expected acknowledgments (or errors) have been received, the Sender is + // no longer holding any state for the message. + // m.Finish() may be called during or after Send(). + Send(ctx context.Context, m binding.Message) error +} + +// SendCloser is a Sender that can be closed. +type SendCloser interface { + Sender + Closer +} + +// Requester sends a message and receives a response +// +// Optional interface that may be implemented by protocols that support +// request/response correlation. +type Requester interface { + // Request sends m like Sender.Send() but also arranges to receive a response. + Request(ctx context.Context, m binding.Message) (binding.Message, error) +} + +// RequesterCloser is a Requester that can be closed. +type RequesterCloser interface { + Requester + Closer +} diff --git a/vendor/github.com/cloudevents/sdk-go/v2/protocol/result.go b/vendor/github.com/cloudevents/sdk-go/v2/protocol/result.go new file mode 100644 index 0000000000..8532800bfe --- /dev/null +++ b/vendor/github.com/cloudevents/sdk-go/v2/protocol/result.go @@ -0,0 +1,96 @@ +package protocol + +import ( + "errors" + "fmt" +) + +// Result leverages go's 1.13 error wrapping. +type Result error + +// Is reports whether any error in err's chain matches target. +// +// The chain consists of err itself followed by the sequence of errors obtained by +// repeatedly calling Unwrap. +// +// An error is considered to match a target if it is equal to that target or if +// it implements a method Is(error) bool such that Is(target) returns true. +// (text from errors/wrap.go) +var ResultIs = errors.Is + +// As finds the first error in err's chain that matches target, and if so, sets +// target to that error value and returns true. +// +// The chain consists of err itself followed by the sequence of errors obtained by +// repeatedly calling Unwrap. +// +// An error matches target if the error's concrete value is assignable to the value +// pointed to by target, or if the error has a method As(interface{}) bool such that +// As(target) returns true. In the latter case, the As method is responsible for +// setting target. +// +// As will panic if target is not a non-nil pointer to either a type that implements +// error, or to any interface type. As returns false if err is nil. +// (text from errors/wrap.go) +var ResultAs = errors.As + +func NewResult(messageFmt string, args ...interface{}) Result { + return fmt.Errorf(messageFmt, args...) // TODO: look at adding ACK/Nak support. +} + +func IsACK(target Result) bool { + // special case, nil target also means ACK. + if target == nil { + return true + } + + return ResultIs(target, ResultACK) +} + +func IsNACK(target Result) bool { + return ResultIs(target, ResultNACK) +} + +var ( + ResultACK = NewReceipt(true, "") + ResultNACK = NewReceipt(false, "") +) + +// NewReceipt returns a fully populated protocol Receipt that should be used as +// a transport.Result. This type holds the base ACK/NACK results. +func NewReceipt(ack bool, messageFmt string, args ...interface{}) Result { + return &Receipt{ + ACK: ack, + Format: messageFmt, + Args: args, + } +} + +// Receipt wraps the fields required to understand if a protocol event is acknowledged. +type Receipt struct { + ACK bool + Format string + Args []interface{} +} + +// make sure Result implements error. +var _ error = (*Receipt)(nil) + +// Is returns if the target error is a Result type checking target. +func (e *Receipt) Is(target error) bool { + if o, ok := target.(*Receipt); ok { + if e.ACK == o.ACK { + return true + } + return false + } + // Allow for wrapped errors. + err := fmt.Errorf(e.Format, e.Args...) + return errors.Is(err, target) +} + +// Error returns the string that is formed by using the format string with the +// provided args. +func (e *Receipt) Error() string { + return fmt.Sprintf(e.Format, e.Args...) +} diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/types/allocate.go b/vendor/github.com/cloudevents/sdk-go/v2/types/allocate.go similarity index 100% rename from vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/types/allocate.go rename to vendor/github.com/cloudevents/sdk-go/v2/types/allocate.go diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/types/doc.go b/vendor/github.com/cloudevents/sdk-go/v2/types/doc.go similarity index 100% rename from vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/types/doc.go rename to vendor/github.com/cloudevents/sdk-go/v2/types/doc.go diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/types/timestamp.go b/vendor/github.com/cloudevents/sdk-go/v2/types/timestamp.go similarity index 100% rename from vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/types/timestamp.go rename to vendor/github.com/cloudevents/sdk-go/v2/types/timestamp.go diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/types/uri.go b/vendor/github.com/cloudevents/sdk-go/v2/types/uri.go similarity index 100% rename from vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/types/uri.go rename to vendor/github.com/cloudevents/sdk-go/v2/types/uri.go diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/types/uriref.go b/vendor/github.com/cloudevents/sdk-go/v2/types/uriref.go similarity index 100% rename from vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/types/uriref.go rename to vendor/github.com/cloudevents/sdk-go/v2/types/uriref.go diff --git a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/types/value.go b/vendor/github.com/cloudevents/sdk-go/v2/types/value.go similarity index 80% rename from vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/types/value.go rename to vendor/github.com/cloudevents/sdk-go/v2/types/value.go index 803be2f11f..c4bea9393d 100644 --- a/vendor/github.com/cloudevents/sdk-go/pkg/cloudevents/types/value.go +++ b/vendor/github.com/cloudevents/sdk-go/v2/types/value.go @@ -68,23 +68,23 @@ func Format(v interface{}) (string, error) { return v, nil case []byte: return FormatBinary(v), nil - case url.URL: + case URI: return v.String(), nil - case *url.URL: + case URIRef: // url.URL is often passed by pointer so allow both return v.String(), nil - case time.Time: - return FormatTime(v), nil + case Timestamp: + return FormatTime(v.Time), nil default: return "", fmt.Errorf("%T is not a CloudEvents type", v) } } // Validate v is a valid CloudEvents attribute value, convert it to one of: -// bool, int32, string, []byte, *url.URL, time.Time +// bool, int32, string, []byte, types.URI, types.URIRef, types.Timestamp func Validate(v interface{}) (interface{}, error) { switch v := v.(type) { - case bool, int32, string, []byte, time.Time: + case bool, int32, string, []byte: return v, nil // Already a CloudEvents type, no validation needed. case uint, uintptr, uint8, uint16, uint32, uint64: @@ -110,18 +110,32 @@ func Validate(v interface{}) (interface{}, error) { if v == nil { break } - return v, nil + return URI{URL: *v}, nil case url.URL: - return &v, nil + return URI{URL: v}, nil + case *URIRef: + if v != nil { + return *v, nil + } + return nil, nil case URIRef: - return &v.URL, nil + return v, nil + case *URI: + if v != nil { + return *v, nil + } + return nil, nil case URI: - return &v.URL, nil - case URLRef: - return &v.URL, nil - + return v, nil + case time.Time: + return Timestamp{Time: v}, nil + case *time.Time: + if v == nil { + break + } + return Timestamp{Time: *v}, nil case Timestamp: - return v.Time, nil + return v, nil } rx := reflect.ValueOf(v) if rx.Kind() == reflect.Ptr && !rx.IsNil() { @@ -131,6 +145,45 @@ func Validate(v interface{}) (interface{}, error) { return nil, fmt.Errorf("invalid CloudEvents value: %#v", v) } +// Clone v clones a CloudEvents attribute value, which is one of the valid types: +// bool, int32, string, []byte, types.URI, types.URIRef, types.Timestamp +// Returns the same type +// Panics if the type is not valid +func Clone(v interface{}) interface{} { + if v == nil { + return nil + } + switch v := v.(type) { + case bool, int32, string, nil: + return v // Already a CloudEvents type, no validation needed. + case []byte: + clone := make([]byte, len(v)) + copy(clone, v) + return v + case url.URL: + return URI{v} + case *url.URL: + return &URI{*v} + case URIRef: + return v + case *URIRef: + return &URIRef{v.URL} + case URI: + return v + case *URI: + return &URI{v.URL} + case time.Time: + return Timestamp{v} + case *time.Time: + return &Timestamp{*v} + case Timestamp: + return v + case *Timestamp: + return &Timestamp{v.Time} + } + panic(fmt.Errorf("invalid CloudEvents value: %#v", v)) +} + // ToBool accepts a bool value or canonical "true"/"false" string. func ToBool(v interface{}) (bool, error) { v, err := Validate(v) @@ -203,8 +256,14 @@ func ToURL(v interface{}) (*url.URL, error) { return nil, err } switch v := v.(type) { - case *url.URL: - return v, nil + case *URI: + return &v.URL, nil + case URI: + return &v.URL, nil + case *URIRef: + return &v.URL, nil + case URIRef: + return &v.URL, nil case string: u, err := url.Parse(v) if err != nil { @@ -223,8 +282,8 @@ func ToTime(v interface{}) (time.Time, error) { return time.Time{}, err } switch v := v.(type) { - case time.Time: - return v, nil + case Timestamp: + return v.Time, nil case string: ts, err := time.Parse(time.RFC3339Nano, v) if err != nil { diff --git a/vendor/github.com/hashicorp/golang-lru/LICENSE b/vendor/github.com/hashicorp/golang-lru/LICENSE new file mode 100644 index 0000000000..be2cc4dfb6 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/LICENSE @@ -0,0 +1,362 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go new file mode 100644 index 0000000000..a86c8539e0 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru.go @@ -0,0 +1,177 @@ +package simplelru + +import ( + "container/list" + "errors" +) + +// EvictCallback is used to get a callback when a cache entry is evicted +type EvictCallback func(key interface{}, value interface{}) + +// LRU implements a non-thread safe fixed size LRU cache +type LRU struct { + size int + evictList *list.List + items map[interface{}]*list.Element + onEvict EvictCallback +} + +// entry is used to hold a value in the evictList +type entry struct { + key interface{} + value interface{} +} + +// NewLRU constructs an LRU of the given size +func NewLRU(size int, onEvict EvictCallback) (*LRU, error) { + if size <= 0 { + return nil, errors.New("Must provide a positive size") + } + c := &LRU{ + size: size, + evictList: list.New(), + items: make(map[interface{}]*list.Element), + onEvict: onEvict, + } + return c, nil +} + +// Purge is used to completely clear the cache. +func (c *LRU) Purge() { + for k, v := range c.items { + if c.onEvict != nil { + c.onEvict(k, v.Value.(*entry).value) + } + delete(c.items, k) + } + c.evictList.Init() +} + +// Add adds a value to the cache. Returns true if an eviction occurred. +func (c *LRU) Add(key, value interface{}) (evicted bool) { + // Check for existing item + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + ent.Value.(*entry).value = value + return false + } + + // Add new item + ent := &entry{key, value} + entry := c.evictList.PushFront(ent) + c.items[key] = entry + + evict := c.evictList.Len() > c.size + // Verify size not exceeded + if evict { + c.removeOldest() + } + return evict +} + +// Get looks up a key's value from the cache. +func (c *LRU) Get(key interface{}) (value interface{}, ok bool) { + if ent, ok := c.items[key]; ok { + c.evictList.MoveToFront(ent) + if ent.Value.(*entry) == nil { + return nil, false + } + return ent.Value.(*entry).value, true + } + return +} + +// Contains checks if a key is in the cache, without updating the recent-ness +// or deleting it for being stale. +func (c *LRU) Contains(key interface{}) (ok bool) { + _, ok = c.items[key] + return ok +} + +// Peek returns the key value (or undefined if not found) without updating +// the "recently used"-ness of the key. +func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) { + var ent *list.Element + if ent, ok = c.items[key]; ok { + return ent.Value.(*entry).value, true + } + return nil, ok +} + +// Remove removes the provided key from the cache, returning if the +// key was contained. +func (c *LRU) Remove(key interface{}) (present bool) { + if ent, ok := c.items[key]; ok { + c.removeElement(ent) + return true + } + return false +} + +// RemoveOldest removes the oldest item from the cache. +func (c *LRU) RemoveOldest() (key interface{}, value interface{}, ok bool) { + ent := c.evictList.Back() + if ent != nil { + c.removeElement(ent) + kv := ent.Value.(*entry) + return kv.key, kv.value, true + } + return nil, nil, false +} + +// GetOldest returns the oldest entry +func (c *LRU) GetOldest() (key interface{}, value interface{}, ok bool) { + ent := c.evictList.Back() + if ent != nil { + kv := ent.Value.(*entry) + return kv.key, kv.value, true + } + return nil, nil, false +} + +// Keys returns a slice of the keys in the cache, from oldest to newest. +func (c *LRU) Keys() []interface{} { + keys := make([]interface{}, len(c.items)) + i := 0 + for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() { + keys[i] = ent.Value.(*entry).key + i++ + } + return keys +} + +// Len returns the number of items in the cache. +func (c *LRU) Len() int { + return c.evictList.Len() +} + +// Resize changes the cache size. +func (c *LRU) Resize(size int) (evicted int) { + diff := c.Len() - size + if diff < 0 { + diff = 0 + } + for i := 0; i < diff; i++ { + c.removeOldest() + } + c.size = size + return diff +} + +// removeOldest removes the oldest item from the cache. +func (c *LRU) removeOldest() { + ent := c.evictList.Back() + if ent != nil { + c.removeElement(ent) + } +} + +// removeElement is used to remove a given list element from the cache +func (c *LRU) removeElement(e *list.Element) { + c.evictList.Remove(e) + kv := e.Value.(*entry) + delete(c.items, kv.key) + if c.onEvict != nil { + c.onEvict(kv.key, kv.value) + } +} diff --git a/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go new file mode 100644 index 0000000000..92d70934d6 --- /dev/null +++ b/vendor/github.com/hashicorp/golang-lru/simplelru/lru_interface.go @@ -0,0 +1,39 @@ +package simplelru + +// LRUCache is the interface for simple LRU cache. +type LRUCache interface { + // Adds a value to the cache, returns true if an eviction occurred and + // updates the "recently used"-ness of the key. + Add(key, value interface{}) bool + + // Returns key's value from the cache and + // updates the "recently used"-ness of the key. #value, isFound + Get(key interface{}) (value interface{}, ok bool) + + // Checks if a key exists in cache without updating the recent-ness. + Contains(key interface{}) (ok bool) + + // Returns key's value without updating the "recently used"-ness of the key. + Peek(key interface{}) (value interface{}, ok bool) + + // Removes a key from the cache. + Remove(key interface{}) bool + + // Removes the oldest entry from cache. + RemoveOldest() (interface{}, interface{}, bool) + + // Returns the oldest entry from the cache. #key, value, isFound + GetOldest() (interface{}, interface{}, bool) + + // Returns a slice of the keys in the cache, from oldest to newest. + Keys() []interface{} + + // Returns the number of items in the cache. + Len() int + + // Clears all cache entries. + Purge() + + // Resizes cache, returning number evicted + Resize(int) int +} diff --git a/vendor/github.com/knative/docs/LICENSE b/vendor/github.com/knative/docs/LICENSE new file mode 100644 index 0000000000..960909248b --- /dev/null +++ b/vendor/github.com/knative/docs/LICENSE @@ -0,0 +1,605 @@ +# Licenses + +## Documentation + +Creative Commons Attribution 4.0 International + +======================================================================= + +Creative Commons Corporation ("Creative Commons") is not a law firm and +does not provide legal services or legal advice. Distribution of +Creative Commons public licenses does not create a lawyer-client or +other relationship. Creative Commons makes its licenses and related +information available on an "as-is" basis. Creative Commons gives no +warranties regarding its licenses, any material licensed under their +terms and conditions, or any related information. Creative Commons +disclaims all liability for damages resulting from their use to the +fullest extent possible. + +Using Creative Commons Public Licenses + +Creative Commons public licenses provide a standard set of terms and +conditions that creators and other rights holders may use to share +original works of authorship and other material subject to copyright +and certain other rights specified in the public license below. The +following considerations are for informational purposes only, are not +exhaustive, and do not form part of our licenses. + + Considerations for licensors: Our public licenses are + intended for use by those authorized to give the public + permission to use material in ways otherwise restricted by + copyright and certain other rights. Our licenses are + irrevocable. Licensors should read and understand the terms + and conditions of the license they choose before applying it. + Licensors should also secure all rights necessary before + applying our licenses so that the public can reuse the + material as expected. Licensors should clearly mark any + material not subject to the license. This includes other CC- + licensed material, or material used under an exception or + limitation to copyright. More considerations for licensors: + wiki.creativecommons.org/Considerations_for_licensors + + Considerations for the public: By using one of our public + licenses, a licensor grants the public permission to use the + licensed material under specified terms and conditions. If + the licensor's permission is not necessary for any reason--for + example, because of any applicable exception or limitation to + copyright--then that use is not regulated by the license. Our + licenses grant only permissions under copyright and certain + other rights that a licensor has authority to grant. Use of + the licensed material may still be restricted for other + reasons, including because others have copyright or other + rights in the material. A licensor may make special requests, + such as asking that all changes be marked or described. + Although not required by our licenses, you are encouraged to + respect those requests where reasonable. More considerations + for the public: + wiki.creativecommons.org/Considerations_for_licensees + +======================================================================= + +Creative Commons Attribution 4.0 International Public License + +By exercising the Licensed Rights (defined below), You accept and agree +to be bound by the terms and conditions of this Creative Commons +Attribution 4.0 International Public License ("Public License"). To the +extent this Public License may be interpreted as a contract, You are +granted the Licensed Rights in consideration of Your acceptance of +these terms and conditions, and the Licensor grants You such rights in +consideration of benefits the Licensor receives from making the +Licensed Material available under these terms and conditions. + + +Section 1 -- Definitions. + + a. Adapted Material means material subject to Copyright and Similar + Rights that is derived from or based upon the Licensed Material + and in which the Licensed Material is translated, altered, + arranged, transformed, or otherwise modified in a manner requiring + permission under the Copyright and Similar Rights held by the + Licensor. For purposes of this Public License, where the Licensed + Material is a musical work, performance, or sound recording, + Adapted Material is always produced where the Licensed Material is + synched in timed relation with a moving image. + + b. Adapter's License means the license You apply to Your Copyright + and Similar Rights in Your contributions to Adapted Material in + accordance with the terms and conditions of this Public License. + + c. Copyright and Similar Rights means copyright and/or similar rights + closely related to copyright including, without limitation, + performance, broadcast, sound recording, and Sui Generis Database + Rights, without regard to how the rights are labeled or + categorized. For purposes of this Public License, the rights + specified in Section 2(b)(1)-(2) are not Copyright and Similar + Rights. + + d. Effective Technological Measures means those measures that, in the + absence of proper authority, may not be circumvented under laws + fulfilling obligations under Article 11 of the WIPO Copyright + Treaty adopted on December 20, 1996, and/or similar international + agreements. + + e. Exceptions and Limitations means fair use, fair dealing, and/or + any other exception or limitation to Copyright and Similar Rights + that applies to Your use of the Licensed Material. + + f. Licensed Material means the artistic or literary work, database, + or other material to which the Licensor applied this Public + License. + + g. Licensed Rights means the rights granted to You subject to the + terms and conditions of this Public License, which are limited to + all Copyright and Similar Rights that apply to Your use of the + Licensed Material and that the Licensor has authority to license. + + h. Licensor means the individual(s) or entity(ies) granting rights + under this Public License. + + i. Share means to provide material to the public by any means or + process that requires permission under the Licensed Rights, such + as reproduction, public display, public performance, distribution, + dissemination, communication, or importation, and to make material + available to the public including in ways that members of the + public may access the material from a place and at a time + individually chosen by them. + + j. Sui Generis Database Rights means rights other than copyright + resulting from Directive 96/9/EC of the European Parliament and of + the Council of 11 March 1996 on the legal protection of databases, + as amended and/or succeeded, as well as other essentially + equivalent rights anywhere in the world. + + k. You means the individual or entity exercising the Licensed Rights + under this Public License. Your has a corresponding meaning. + + +Section 2 -- Scope. + + a. License grant. + + 1. Subject to the terms and conditions of this Public License, + the Licensor hereby grants You a worldwide, royalty-free, + non-sublicensable, non-exclusive, irrevocable license to + exercise the Licensed Rights in the Licensed Material to: + + a. reproduce and Share the Licensed Material, in whole or + in part; and + + b. produce, reproduce, and Share Adapted Material. + + 2. Exceptions and Limitations. For the avoidance of doubt, where + Exceptions and Limitations apply to Your use, this Public + License does not apply, and You do not need to comply with + its terms and conditions. + + 3. Term. The term of this Public License is specified in Section + 6(a). + + 4. Media and formats; technical modifications allowed. The + Licensor authorizes You to exercise the Licensed Rights in + all media and formats whether now known or hereafter created, + and to make technical modifications necessary to do so. The + Licensor waives and/or agrees not to assert any right or + authority to forbid You from making technical modifications + necessary to exercise the Licensed Rights, including + technical modifications necessary to circumvent Effective + Technological Measures. For purposes of this Public License, + simply making modifications authorized by this Section 2(a) + (4) never produces Adapted Material. + + 5. Downstream recipients. + + a. Offer from the Licensor -- Licensed Material. Every + recipient of the Licensed Material automatically + receives an offer from the Licensor to exercise the + Licensed Rights under the terms and conditions of this + Public License. + + b. No downstream restrictions. You may not offer or impose + any additional or different terms or conditions on, or + apply any Effective Technological Measures to, the + Licensed Material if doing so restricts exercise of the + Licensed Rights by any recipient of the Licensed + Material. + + 6. No endorsement. Nothing in this Public License constitutes or + may be construed as permission to assert or imply that You + are, or that Your use of the Licensed Material is, connected + with, or sponsored, endorsed, or granted official status by, + the Licensor or others designated to receive attribution as + provided in Section 3(a)(1)(A)(i). + + b. Other rights. + + 1. Moral rights, such as the right of integrity, are not + licensed under this Public License, nor are publicity, + privacy, and/or other similar personality rights; however, to + the extent possible, the Licensor waives and/or agrees not to + assert any such rights held by the Licensor to the limited + extent necessary to allow You to exercise the Licensed + Rights, but not otherwise. + + 2. Patent and trademark rights are not licensed under this + Public License. + + 3. To the extent possible, the Licensor waives any right to + collect royalties from You for the exercise of the Licensed + Rights, whether directly or through a collecting society + under any voluntary or waivable statutory or compulsory + licensing scheme. In all other cases the Licensor expressly + reserves any right to collect such royalties. + + +Section 3 -- License Conditions. + +Your exercise of the Licensed Rights is expressly made subject to the +following conditions. + + a. Attribution. + + 1. If You Share the Licensed Material (including in modified + form), You must: + + a. retain the following if it is supplied by the Licensor + with the Licensed Material: + + i. identification of the creator(s) of the Licensed + Material and any others designated to receive + attribution, in any reasonable manner requested by + the Licensor (including by pseudonym if + designated); + + ii. a copyright notice; + + iii. a notice that refers to this Public License; + + iv. a notice that refers to the disclaimer of + warranties; + + v. a URI or hyperlink to the Licensed Material to the + extent reasonably practicable; + + b. indicate if You modified the Licensed Material and + retain an indication of any previous modifications; and + + c. indicate the Licensed Material is licensed under this + Public License, and include the text of, or the URI or + hyperlink to, this Public License. + + 2. You may satisfy the conditions in Section 3(a)(1) in any + reasonable manner based on the medium, means, and context in + which You Share the Licensed Material. For example, it may be + reasonable to satisfy the conditions by providing a URI or + hyperlink to a resource that includes the required + information. + + 3. If requested by the Licensor, You must remove any of the + information required by Section 3(a)(1)(A) to the extent + reasonably practicable. + + 4. If You Share Adapted Material You produce, the Adapter's + License You apply must not prevent recipients of the Adapted + Material from complying with this Public License. + + +Section 4 -- Sui Generis Database Rights. + +Where the Licensed Rights include Sui Generis Database Rights that +apply to Your use of the Licensed Material: + + a. for the avoidance of doubt, Section 2(a)(1) grants You the right + to extract, reuse, reproduce, and Share all or a substantial + portion of the contents of the database; + + b. if You include all or a substantial portion of the database + contents in a database in which You have Sui Generis Database + Rights, then the database in which You have Sui Generis Database + Rights (but not its individual contents) is Adapted Material; and + + c. You must comply with the conditions in Section 3(a) if You Share + all or a substantial portion of the contents of the database. + +For the avoidance of doubt, this Section 4 supplements and does not +replace Your obligations under this Public License where the Licensed +Rights include other Copyright and Similar Rights. + + +Section 5 -- Disclaimer of Warranties and Limitation of Liability. + + a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE + EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS + AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF + ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, + IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, + WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, + ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT + KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT + ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. + + b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE + TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, + NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, + INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, + COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR + USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN + ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR + DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR + IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. + + c. The disclaimer of warranties and limitation of liability provided + above shall be interpreted in a manner that, to the extent + possible, most closely approximates an absolute disclaimer and + waiver of all liability. + + +Section 6 -- Term and Termination. + + a. This Public License applies for the term of the Copyright and + Similar Rights licensed here. However, if You fail to comply with + this Public License, then Your rights under this Public License + terminate automatically. + + b. Where Your right to use the Licensed Material has terminated under + Section 6(a), it reinstates: + + 1. automatically as of the date the violation is cured, provided + it is cured within 30 days of Your discovery of the + violation; or + + 2. upon express reinstatement by the Licensor. + + For the avoidance of doubt, this Section 6(b) does not affect any + right the Licensor may have to seek remedies for Your violations + of this Public License. + + c. For the avoidance of doubt, the Licensor may also offer the + Licensed Material under separate terms or conditions or stop + distributing the Licensed Material at any time; however, doing so + will not terminate this Public License. + + d. Sections 1, 5, 6, 7, and 8 survive termination of this Public + License. + + +Section 7 -- Other Terms and Conditions. + + a. The Licensor shall not be bound by any additional or different + terms or conditions communicated by You unless expressly agreed. + + b. Any arrangements, understandings, or agreements regarding the + Licensed Material not stated herein are separate from and + independent of the terms and conditions of this Public License. + + +Section 8 -- Interpretation. + + a. For the avoidance of doubt, this Public License does not, and + shall not be interpreted to, reduce, limit, restrict, or impose + conditions on any use of the Licensed Material that could lawfully + be made without permission under this Public License. + + b. To the extent possible, if any provision of this Public License is + deemed unenforceable, it shall be automatically reformed to the + minimum extent necessary to make it enforceable. If the provision + cannot be reformed, it shall be severed from this Public License + without affecting the enforceability of the remaining terms and + conditions. + + c. No term or condition of this Public License will be waived and no + failure to comply consented to unless expressly agreed to by the + Licensor. + + d. Nothing in this Public License constitutes or may be interpreted + as a limitation upon, or waiver of, any privileges and immunities + that apply to the Licensor or You, including from the legal + processes of any jurisdiction or authority. + + +======================================================================= + +Creative Commons is not a party to its public +licenses. Notwithstanding, Creative Commons may elect to apply one of +its public licenses to material it publishes and in those instances +will be considered the “Licensor.” The text of the Creative Commons +public licenses is dedicated to the public domain under the CC0 Public +Domain Dedication. Except for the limited purpose of indicating that +material is shared under a Creative Commons public license or as +otherwise permitted by the Creative Commons policies published at +creativecommons.org/policies, Creative Commons does not authorize the +use of the trademark "Creative Commons" or any other trademark or logo +of Creative Commons without its prior written consent including, +without limitation, in connection with any unauthorized modifications +to any of its public licenses or any other arrangements, +understandings, or agreements concerning use of licensed material. For +the avoidance of doubt, this paragraph does not form part of the +public licenses. + +Creative Commons may be contacted at creativecommons.org. + +## Samples and Code + +Code samples in this repository are provided with the following license: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/knative/docs/docs/serving/samples/grpc-ping-go/proto/ping.pb.go b/vendor/github.com/knative/docs/docs/serving/samples/grpc-ping-go/proto/ping.pb.go new file mode 100644 index 0000000000..0543a6279c --- /dev/null +++ b/vendor/github.com/knative/docs/docs/serving/samples/grpc-ping-go/proto/ping.pb.go @@ -0,0 +1,213 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: ping.proto + +// +build grpcping + +package ping + +import ( + fmt "fmt" + + proto "github.com/golang/protobuf/proto" + + math "math" + + context "golang.org/x/net/context" + + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type Request struct { + Msg string `protobuf:"bytes,1,opt,name=msg" json:"msg,omitempty"` +} + +func (m *Request) Reset() { *m = Request{} } +func (m *Request) String() string { return proto.CompactTextString(m) } +func (*Request) ProtoMessage() {} +func (*Request) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +func (m *Request) GetMsg() string { + if m != nil { + return m.Msg + } + return "" +} + +type Response struct { + Msg string `protobuf:"bytes,1,opt,name=msg" json:"msg,omitempty"` +} + +func (m *Response) Reset() { *m = Response{} } +func (m *Response) String() string { return proto.CompactTextString(m) } +func (*Response) ProtoMessage() {} +func (*Response) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } + +func (m *Response) GetMsg() string { + if m != nil { + return m.Msg + } + return "" +} + +func init() { + proto.RegisterType((*Request)(nil), "ping.Request") + proto.RegisterType((*Response)(nil), "ping.Response") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for PingService service + +type PingServiceClient interface { + Ping(ctx context.Context, in *Request, opts ...grpc.CallOption) (*Response, error) + PingStream(ctx context.Context, opts ...grpc.CallOption) (PingService_PingStreamClient, error) +} + +type pingServiceClient struct { + cc *grpc.ClientConn +} + +func NewPingServiceClient(cc *grpc.ClientConn) PingServiceClient { + return &pingServiceClient{cc} +} + +func (c *pingServiceClient) Ping(ctx context.Context, in *Request, opts ...grpc.CallOption) (*Response, error) { + out := new(Response) + err := grpc.Invoke(ctx, "/ping.PingService/Ping", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *pingServiceClient) PingStream(ctx context.Context, opts ...grpc.CallOption) (PingService_PingStreamClient, error) { + stream, err := grpc.NewClientStream(ctx, &_PingService_serviceDesc.Streams[0], c.cc, "/ping.PingService/PingStream", opts...) + if err != nil { + return nil, err + } + x := &pingServicePingStreamClient{stream} + return x, nil +} + +type PingService_PingStreamClient interface { + Send(*Request) error + Recv() (*Response, error) + grpc.ClientStream +} + +type pingServicePingStreamClient struct { + grpc.ClientStream +} + +func (x *pingServicePingStreamClient) Send(m *Request) error { + return x.ClientStream.SendMsg(m) +} + +func (x *pingServicePingStreamClient) Recv() (*Response, error) { + m := new(Response) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for PingService service + +type PingServiceServer interface { + Ping(context.Context, *Request) (*Response, error) + PingStream(PingService_PingStreamServer) error +} + +func RegisterPingServiceServer(s *grpc.Server, srv PingServiceServer) { + s.RegisterService(&_PingService_serviceDesc, srv) +} + +func _PingService_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Request) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(PingServiceServer).Ping(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/ping.PingService/Ping", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(PingServiceServer).Ping(ctx, req.(*Request)) + } + return interceptor(ctx, in, info, handler) +} + +func _PingService_PingStream_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(PingServiceServer).PingStream(&pingServicePingStreamServer{stream}) +} + +type PingService_PingStreamServer interface { + Send(*Response) error + Recv() (*Request, error) + grpc.ServerStream +} + +type pingServicePingStreamServer struct { + grpc.ServerStream +} + +func (x *pingServicePingStreamServer) Send(m *Response) error { + return x.ServerStream.SendMsg(m) +} + +func (x *pingServicePingStreamServer) Recv() (*Request, error) { + m := new(Request) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _PingService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "ping.PingService", + HandlerType: (*PingServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Ping", + Handler: _PingService_Ping_Handler, + }, + }, + Streams: []grpc.StreamDesc{ + { + StreamName: "PingStream", + Handler: _PingService_PingStream_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "ping.proto", +} + +func init() { proto.RegisterFile("ping.proto", fileDescriptor1) } + +var fileDescriptor1 = []byte{ + // 139 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0xc8, 0xcc, 0x4b, + 0xd7, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x01, 0xb1, 0x95, 0xa4, 0xb9, 0xd8, 0x83, 0x52, + 0x0b, 0x4b, 0x53, 0x8b, 0x4b, 0x84, 0x04, 0xb8, 0x98, 0x73, 0x8b, 0xd3, 0x25, 0x18, 0x15, 0x18, + 0x35, 0x38, 0x83, 0x40, 0x4c, 0x25, 0x19, 0x2e, 0x8e, 0xa0, 0xd4, 0xe2, 0x82, 0xfc, 0xbc, 0xe2, + 0x54, 0x4c, 0x59, 0xa3, 0x4c, 0x2e, 0xee, 0x80, 0xcc, 0xbc, 0xf4, 0xe0, 0xd4, 0xa2, 0xb2, 0xcc, + 0xe4, 0x54, 0x21, 0x75, 0x2e, 0x16, 0x10, 0x57, 0x88, 0x57, 0x0f, 0x6c, 0x09, 0xd4, 0x54, 0x29, + 0x3e, 0x18, 0x17, 0x62, 0x8e, 0x12, 0x83, 0x90, 0x21, 0x17, 0x17, 0x58, 0x5f, 0x49, 0x51, 0x6a, + 0x62, 0x2e, 0x41, 0xe5, 0x1a, 0x8c, 0x06, 0x8c, 0x49, 0x6c, 0x60, 0x27, 0x1b, 0x03, 0x02, 0x00, + 0x00, 0xff, 0xff, 0x85, 0x87, 0x57, 0xf8, 0xc0, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/knative/docs/test/flag.go b/vendor/github.com/knative/docs/test/flag.go new file mode 100644 index 0000000000..d07ce2ed22 --- /dev/null +++ b/vendor/github.com/knative/docs/test/flag.go @@ -0,0 +1,75 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package test + +import ( + "flag" + "fmt" + "os" + "strings" +) + +// Flags holds the command line flags or defaults for settings in the user's environment. +// See EnvironmentFlags for a list of supported fields. +var Flags = initializeFlags() + +// EnvironmentFlags define the flags that are needed to run the e2e tests. +type EnvironmentFlags struct { + Cluster string // K8s cluster (defaults to cluster in kubeconfig) + LogVerbose bool // Enable verbose logging + DockerRepo string // Docker repo (defaults to $KO_DOCKER_REPO) + EmitMetrics bool // Emit metrics + Tag string // Docker image tag + Languages string // Whitelisted languages to run +} + +func initializeFlags() *EnvironmentFlags { + var f EnvironmentFlags + flag.StringVar(&f.Cluster, "cluster", "", + "Provide the cluster to test against. Defaults to the current cluster in kubeconfig.") + + flag.BoolVar(&f.LogVerbose, "logverbose", false, + "Set this flag to true if you would like to see verbose logging.") + + flag.BoolVar(&f.EmitMetrics, "emitmetrics", false, + "Set this flag to true if you would like tests to emit metrics, e.g. latency of resources being realized in the system.") + + flag.StringVar(&f.DockerRepo, "dockerrepo", os.Getenv("KO_DOCKER_REPO"), + "Provide the uri of the docker repo you have uploaded the test image to using `uploadtestimage.sh`. Defaults to $KO_DOCKER_REPO") + + flag.StringVar(&f.Tag, "tag", "latest", "Provide the version tag for the test images.") + + flag.StringVar(&f.Languages, "languages", "", "Comma separated languages to run e2e test on.") + + return &f +} + +// ImagePath is a helper function to prefix image name with repo and suffix with tag +func ImagePath(name string) string { + return fmt.Sprintf("%s/%s:%s", Flags.DockerRepo, name, Flags.Tag) +} + +// GetWhitelistedLanguages is a helper function to return a map of whitelisted languages based on Languages filter +func GetWhitelistedLanguages() map[string]bool { + whitelist := make(map[string]bool) + if "" != Flags.Languages { + for _, l := range strings.Split(Flags.Languages, ",") { + whitelist[l] = true + } + } + return whitelist +} diff --git a/vendor/github.com/knative/docs/test/sampleapp/config.go b/vendor/github.com/knative/docs/test/sampleapp/config.go new file mode 100644 index 0000000000..52b2d44a2c --- /dev/null +++ b/vendor/github.com/knative/docs/test/sampleapp/config.go @@ -0,0 +1,103 @@ +/* +Copyright 2019 The Knative Authors + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package sampleapp + +import ( + "fmt" + "io/ioutil" + "os/exec" + "strings" + "testing" + + yaml "gopkg.in/yaml.v2" +) + +const ( + // using these defaults if not provided, see useDefaultIfNotProvided function below + defaultSrcDir = "../../docs/serving/samples/hello-world/helloworld-%s" + defaultWorkDir = "helloworld-%s_tmp" + defaultAppName = "helloworld-%s" + defaultYamlImagePlaceHolder = "docker.io/{username}/helloworld-%s" + + // ActionMsg serves as documentation purpose, which will be referenced for + // clearly displaying error messages. + ActionMsg = "All files required for running sample apps are checked " + + "against README.md, the content of source files should be identical with what's " + + "in README.md file, the list of the files to be verified is the same set of files " + + "used for running sample apps, they are configured in `/test/sampleapp/config.yaml`. " + + "If an exception is needed the file can be configured to be copied as a separate step " + + "in `PreCommand` such as: " + + "https://github.com/knative/docs/blob/65f7b402fee7f94dfbd9e4512ef3beed7b85de66/test/sampleapp/config.yaml#L4" +) + +// AllConfigs contains all LanguageConfig +type AllConfigs struct { + Languages []LanguageConfig `yaml:"languages` +} + +// LanguageConfig contains all information for building/deploying an app +type LanguageConfig struct { + Language string `yaml:"language"` + ExpectedOutput string `yaml:"expectedOutput"` + SrcDir string `yaml:"srcDir"` // Directory contains sample code + WorkDir string `yaml:"workDir"` // Temp work directory + AppName string `yaml:"appName"` + YamlImagePlaceholder string `yaml:"yamlImagePlaceholder"` // Token to be replaced by real docker image URI + PreCommands []Command `yaml:"preCommands"` // Commands to be ran before copying + Copies []string `yaml:"copies"` // Files to be copied from SrcDir to WorkDir + PostCommands []Command `yaml:"postCommands"` // Commands to be ran after copying +} + +// Command contains shell commands +type Command struct { + Exec string `yaml:"exec"` + Args string `yaml:"args"` +} + +// UseDefaultIfNotProvided sets default value to SrcDir, WorkDir, AppName, and YamlImagePlaceholder if not provided +func (lc *LanguageConfig) UseDefaultIfNotProvided() { + if "" == lc.SrcDir { + lc.SrcDir = fmt.Sprintf(defaultSrcDir, lc.Language) + } + if "" == lc.WorkDir { + lc.WorkDir = fmt.Sprintf(defaultWorkDir, lc.Language) + } + if "" == lc.AppName { + lc.AppName = fmt.Sprintf(defaultAppName, lc.Language) + } + if "" == lc.YamlImagePlaceholder { + lc.YamlImagePlaceholder = fmt.Sprintf(defaultYamlImagePlaceHolder, lc.Language) + } +} + +// Run runs command and fail if it failed +func (c *Command) Run(t *testing.T) { + args := strings.Split(c.Args, " ") + if output, err := exec.Command(c.Exec, args...).CombinedOutput(); err != nil { + t.Fatalf("Error executing: '%s' '%s' -err: '%v'", c.Exec, c.Args, strings.TrimSpace(string(output))) + } +} + +// GetConfigs parses a config yaml file and return AllConfigs struct +func GetConfigs(configPath string) (AllConfigs, error) { + var lcs AllConfigs + content, err := ioutil.ReadFile(configPath) + if nil == err { + err = yaml.Unmarshal(content, &lcs) + } + return lcs, err +} diff --git a/vendor/github.com/lightstep/tracecontext.go/LICENSE b/vendor/github.com/lightstep/tracecontext.go/LICENSE new file mode 100644 index 0000000000..853b46db12 --- /dev/null +++ b/vendor/github.com/lightstep/tracecontext.go/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/lightstep/tracecontext.go/traceparent/package.go b/vendor/github.com/lightstep/tracecontext.go/traceparent/package.go new file mode 100644 index 0000000000..251f084203 --- /dev/null +++ b/vendor/github.com/lightstep/tracecontext.go/traceparent/package.go @@ -0,0 +1,192 @@ +package traceparent + +import ( + "bytes" + "encoding/hex" + "errors" + "fmt" + "regexp" +) + +const ( + // Version represents the maximum `traceparent` header version that is fully supported. + // The library attempts optimistic forwards compatibility with higher versions. + Version = 0 +) + +var ( + // ErrInvalidFormat occurs when the format is invalid, such as if there are missing characters + // or a field contains an unexpected character set. + ErrInvalidFormat = errors.New("tracecontext: Invalid traceparent format") + // ErrInvalidVersion occurs when the encoded version is invalid, i.e., is 255. + ErrInvalidVersion = errors.New("tracecontext: Invalid traceparent version") + // ErrInvalidTraceID occurs when the encoded trace ID is invalid, i.e., all bytes are 0 + ErrInvalidTraceID = errors.New("tracecontext: Invalid traceparent trace ID") + // ErrInvalidSpanID occurs when the encoded span ID is invalid, i.e., all bytes are 0 + ErrInvalidSpanID = errors.New("tracecontext: Invalid traceparent span ID") +) + +const ( + maxVersion = 254 + + numVersionBytes = 1 + numTraceIDBytes = 16 + numSpanIDBytes = 8 + numFlagBytes = 1 +) + +var ( + re = regexp.MustCompile(`^([a-f0-9]{2})-([a-f0-9]{32})-([a-f0-9]{16})-([a-f0-9]{2})(-.*)?$`) + + invalidTraceIDAllZeroes = make([]byte, numTraceIDBytes, numTraceIDBytes) + invalidSpanIDAllZeroes = make([]byte, numSpanIDBytes, numSpanIDBytes) +) + +// Flags contain recommendations from the caller relevant to the whole trace, e.g., for sampling. +type Flags struct { + // Recorded indicates that at least one span in the trace may have been recorded. + // Tracing systems are advised to record all new spans in recorded traces, as incomplete traces may lead to + // a degraded tracing experience. + Recorded bool +} + +// String encodes the Flags in an 8-bit field. +func (f Flags) String() string { + var flags [1]byte + if f.Recorded { + flags[0] = 1 + } + return fmt.Sprintf("%02x", flags) +} + +// TraceParent indicates information about a span and the trace of which it is part, +// so that child spans started in the same trace may propagate necessary data and share relevant behaviour. +type TraceParent struct { + // Version represents the version used to encode the `TraceParent`. + // Typically, this is the minimum of this library's supported version and the version of the header from which the `TraceParent` was decoded. + Version uint8 + // TraceID is the trace ID of the whole trace, and should be constant across all spans in a given trace. + // A `TraceID` that contains only 0 bytes should be treated as invalid. + TraceID [16]byte + // SpanID is the span ID of the span from which the `TraceParent` was derived, i.e., the parent of the next span that will be started. + // Span IDs should be unique within a given trace. + // A `TraceID` that contains only 0 bytes should be treated as invalid. + SpanID [8]byte + // Flags indicate behaviour that is recommended when handling new spans. + Flags Flags +} + +// String encodes the `TraceParent` into a string formatted according to the W3C spec. +// The string may be invalid if any fields are invalid, e.g., if the `TraceID` contains only 0 bytes. +func (tp TraceParent) String() string { + return fmt.Sprintf("%02x-%032x-%016x-%s", tp.Version, tp.TraceID, tp.SpanID, tp.Flags) +} + +// Parse attempts to decode a `TraceParent` from a byte array. +// It returns an error if the byte array is incorrectly formatted or otherwise invalid. +func Parse(b []byte) (TraceParent, error) { + return parse(b) +} + +// ParseString attempts to decode a `TraceParent` from a string. +// It returns an error if the string is incorrectly formatted or otherwise invalid. +func ParseString(s string) (TraceParent, error) { + return parse([]byte(s)) +} + +func parse(b []byte) (tp TraceParent, err error) { + matches := re.FindSubmatch(b) + if len(matches) < 6 { + err = ErrInvalidFormat + return + } + + var version uint8 + if version, err = parseVersion(matches[1]); err != nil { + return + } + if version == Version && len(matches[5]) > 0 { + err = ErrInvalidFormat + return + } + + var traceID [16]byte + if traceID, err = parseTraceID(matches[2]); err != nil { + return + } + + var spanID [8]byte + if spanID, err = parseSpanID(matches[3]); err != nil { + return + } + + var flags Flags + if flags, err = parseFlags(matches[4]); err != nil { + return + } + + tp.Version = Version + tp.TraceID = traceID + tp.SpanID = spanID + tp.Flags = flags + + return tp, nil +} + +func parseVersion(b []byte) (uint8, error) { + version, ok := parseEncodedSegment(b, numVersionBytes) + if !ok { + return 0, ErrInvalidFormat + } + if version[0] > maxVersion { + return 0, ErrInvalidVersion + } + return version[0], nil +} + +func parseTraceID(b []byte) (traceID [16]byte, err error) { + id, ok := parseEncodedSegment(b, numTraceIDBytes) + if !ok { + return traceID, ErrInvalidFormat + } + if bytes.Equal(id, invalidTraceIDAllZeroes) { + return traceID, ErrInvalidTraceID + } + + copy(traceID[:], id) + + return traceID, nil +} + +func parseSpanID(b []byte) (spanID [8]byte, err error) { + id, ok := parseEncodedSegment(b, numSpanIDBytes) + if !ok { + return spanID, ErrInvalidFormat + } + if bytes.Equal(id, invalidSpanIDAllZeroes) { + return spanID, ErrInvalidSpanID + } + + copy(spanID[:], id) + + return spanID, nil +} + +func parseFlags(b []byte) (Flags, error) { + flags, ok := parseEncodedSegment(b, numFlagBytes) + if !ok { + return Flags{}, ErrInvalidFormat + } + + return Flags{ + Recorded: (flags[0] & 1) == 1, + }, nil +} + +func parseEncodedSegment(src []byte, expectedLen int) ([]byte, bool) { + dst := make([]byte, hex.DecodedLen(len(src))) + if n, err := hex.Decode(dst, src); n != expectedLen || err != nil { + return dst, false + } + return dst, true +} diff --git a/vendor/github.com/lightstep/tracecontext.go/tracestate/package.go b/vendor/github.com/lightstep/tracecontext.go/tracestate/package.go new file mode 100644 index 0000000000..563ad1f8a1 --- /dev/null +++ b/vendor/github.com/lightstep/tracecontext.go/tracestate/package.go @@ -0,0 +1,123 @@ +package tracestate + +import ( + "errors" + "fmt" + "regexp" + "strings" +) + +var ( + // ErrInvalidListMember occurs if at least one list member is invalid, e.g., contains an unexpected character. + ErrInvalidListMember = errors.New("tracecontext: Invalid tracestate list member") + // ErrDuplicateListMemberKey occurs if at least two list members contain the same vendor-tenant pair. + ErrDuplicateListMemberKey = errors.New("tracecontext: Duplicate list member key in tracestate") + // ErrTooManyListMembers occurs if the list contains more than the maximum number of members per the spec, i.e., 32. + ErrTooManyListMembers = errors.New("tracecontext: Too many list members in tracestate") +) + +const ( + maxMembers = 32 + + delimiter = "," +) + +var ( + re = regexp.MustCompile(`^\s*(?:([a-z0-9_\-*/]{1,241})@([a-z0-9_\-*/]{1,14})|([a-z0-9_\-*/]{1,256}))=([\x20-\x2b\x2d-\x3c\x3e-\x7e]*[\x21-\x2b\x2d-\x3c\x3e-\x7e])\s*$`) +) + +// Member contains vendor-specific data that should be propagated across all new spans started within a given trace. +type Member struct { + // Vendor is a key representing a particular trace vendor. + Vendor string + // Tenant is a key used to distinguish between tenants of a multi-tenant trace vendor. + Tenant string + // Value is the particular data that the vendor intents to pass to child spans. + Value string +} + +// String encodes a `Member` into a string formatted according to the W3C spec. +// The string may be invalid if any fields are invalid, e.g, the vendor contains a non-compliant character. +func (m Member) String() string { + if m.Tenant == "" { + return fmt.Sprintf("%s=%s", m.Vendor, m.Value) + } + return fmt.Sprintf("%s@%s=%s", m.Vendor, m.Tenant, m.Value) +} + +// TraceState represents a list of `Member`s that should be propagated to new spans started in a trace. +type TraceState []Member + +// String encodes all `Member`s of the `TraceState` into a single string, formatted according to the W3C spec. +// The string may be invalid if any `Member`s are invalid, e.g., containing a non-compliant character. +func (ts TraceState) String() string { + var members []string + for _, member := range ts { + members = append(members, member.String()) + } + return strings.Join(members, ",") +} + +// Parse attempts to decode a `TraceState` from a byte array. +// It returns an error if the byte array is invalid, e.g., it contains an incorrectly formatted list member. +func Parse(traceState []byte) (TraceState, error) { + return parse(string(traceState)) +} + +// ParseString attempts to decode a `TraceState` from a string. +// It returns an error if the string is invalid, e.g., it contains an incorrectly formatted list member. +func ParseString(traceState string) (TraceState, error) { + return parse(traceState) +} + +func parse(traceState string) (ts TraceState, err error) { + found := make(map[string]interface{}) + + members := strings.Split(traceState, delimiter) + + for _, member := range members { + if len(member) == 0 { + continue + } + + var m Member + m, err = parseMember(member) + if err != nil { + return + } + + key := fmt.Sprintf("%s%s", m.Vendor, m.Tenant) + if _, ok := found[key]; ok { + err = ErrDuplicateListMemberKey + return + } + found[key] = nil + + ts = append(ts, m) + + if len(ts) > maxMembers { + err = ErrTooManyListMembers + return + } + } + + return +} + +func parseMember(s string) (Member, error) { + matches := re.FindStringSubmatch(s) + if len(matches) != 5 { + return Member{}, ErrInvalidListMember + } + + vendor := matches[1] + if vendor == "" { + vendor = matches[3] + } + + return Member{ + Vendor: vendor, + Tenant: matches[2], + Value: matches[4], + }, nil +} diff --git a/vendor/github.com/pkg/errors/LICENSE b/vendor/github.com/pkg/errors/LICENSE new file mode 100644 index 0000000000..835ba3e755 --- /dev/null +++ b/vendor/github.com/pkg/errors/LICENSE @@ -0,0 +1,23 @@ +Copyright (c) 2015, Dave Cheney +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go new file mode 100644 index 0000000000..161aea2582 --- /dev/null +++ b/vendor/github.com/pkg/errors/errors.go @@ -0,0 +1,288 @@ +// Package errors provides simple error handling primitives. +// +// The traditional error handling idiom in Go is roughly akin to +// +// if err != nil { +// return err +// } +// +// which when applied recursively up the call stack results in error reports +// without context or debugging information. The errors package allows +// programmers to add context to the failure path in their code in a way +// that does not destroy the original value of the error. +// +// Adding context to an error +// +// The errors.Wrap function returns a new error that adds context to the +// original error by recording a stack trace at the point Wrap is called, +// together with the supplied message. For example +// +// _, err := ioutil.ReadAll(r) +// if err != nil { +// return errors.Wrap(err, "read failed") +// } +// +// If additional control is required, the errors.WithStack and +// errors.WithMessage functions destructure errors.Wrap into its component +// operations: annotating an error with a stack trace and with a message, +// respectively. +// +// Retrieving the cause of an error +// +// Using errors.Wrap constructs a stack of errors, adding context to the +// preceding error. Depending on the nature of the error it may be necessary +// to reverse the operation of errors.Wrap to retrieve the original error +// for inspection. Any error value which implements this interface +// +// type causer interface { +// Cause() error +// } +// +// can be inspected by errors.Cause. errors.Cause will recursively retrieve +// the topmost error that does not implement causer, which is assumed to be +// the original cause. For example: +// +// switch err := errors.Cause(err).(type) { +// case *MyError: +// // handle specifically +// default: +// // unknown error +// } +// +// Although the causer interface is not exported by this package, it is +// considered a part of its stable public interface. +// +// Formatted printing of errors +// +// All error values returned from this package implement fmt.Formatter and can +// be formatted by the fmt package. The following verbs are supported: +// +// %s print the error. If the error has a Cause it will be +// printed recursively. +// %v see %s +// %+v extended format. Each Frame of the error's StackTrace will +// be printed in detail. +// +// Retrieving the stack trace of an error or wrapper +// +// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are +// invoked. This information can be retrieved with the following interface: +// +// type stackTracer interface { +// StackTrace() errors.StackTrace +// } +// +// The returned errors.StackTrace type is defined as +// +// type StackTrace []Frame +// +// The Frame type represents a call site in the stack trace. Frame supports +// the fmt.Formatter interface that can be used for printing information about +// the stack trace of this error. For example: +// +// if err, ok := err.(stackTracer); ok { +// for _, f := range err.StackTrace() { +// fmt.Printf("%+s:%d\n", f, f) +// } +// } +// +// Although the stackTracer interface is not exported by this package, it is +// considered a part of its stable public interface. +// +// See the documentation for Frame.Format for more details. +package errors + +import ( + "fmt" + "io" +) + +// New returns an error with the supplied message. +// New also records the stack trace at the point it was called. +func New(message string) error { + return &fundamental{ + msg: message, + stack: callers(), + } +} + +// Errorf formats according to a format specifier and returns the string +// as a value that satisfies error. +// Errorf also records the stack trace at the point it was called. +func Errorf(format string, args ...interface{}) error { + return &fundamental{ + msg: fmt.Sprintf(format, args...), + stack: callers(), + } +} + +// fundamental is an error that has a message and a stack, but no caller. +type fundamental struct { + msg string + *stack +} + +func (f *fundamental) Error() string { return f.msg } + +func (f *fundamental) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + io.WriteString(s, f.msg) + f.stack.Format(s, verb) + return + } + fallthrough + case 's': + io.WriteString(s, f.msg) + case 'q': + fmt.Fprintf(s, "%q", f.msg) + } +} + +// WithStack annotates err with a stack trace at the point WithStack was called. +// If err is nil, WithStack returns nil. +func WithStack(err error) error { + if err == nil { + return nil + } + return &withStack{ + err, + callers(), + } +} + +type withStack struct { + error + *stack +} + +func (w *withStack) Cause() error { return w.error } + +// Unwrap provides compatibility for Go 1.13 error chains. +func (w *withStack) Unwrap() error { return w.error } + +func (w *withStack) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + fmt.Fprintf(s, "%+v", w.Cause()) + w.stack.Format(s, verb) + return + } + fallthrough + case 's': + io.WriteString(s, w.Error()) + case 'q': + fmt.Fprintf(s, "%q", w.Error()) + } +} + +// Wrap returns an error annotating err with a stack trace +// at the point Wrap is called, and the supplied message. +// If err is nil, Wrap returns nil. +func Wrap(err error, message string) error { + if err == nil { + return nil + } + err = &withMessage{ + cause: err, + msg: message, + } + return &withStack{ + err, + callers(), + } +} + +// Wrapf returns an error annotating err with a stack trace +// at the point Wrapf is called, and the format specifier. +// If err is nil, Wrapf returns nil. +func Wrapf(err error, format string, args ...interface{}) error { + if err == nil { + return nil + } + err = &withMessage{ + cause: err, + msg: fmt.Sprintf(format, args...), + } + return &withStack{ + err, + callers(), + } +} + +// WithMessage annotates err with a new message. +// If err is nil, WithMessage returns nil. +func WithMessage(err error, message string) error { + if err == nil { + return nil + } + return &withMessage{ + cause: err, + msg: message, + } +} + +// WithMessagef annotates err with the format specifier. +// If err is nil, WithMessagef returns nil. +func WithMessagef(err error, format string, args ...interface{}) error { + if err == nil { + return nil + } + return &withMessage{ + cause: err, + msg: fmt.Sprintf(format, args...), + } +} + +type withMessage struct { + cause error + msg string +} + +func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() } +func (w *withMessage) Cause() error { return w.cause } + +// Unwrap provides compatibility for Go 1.13 error chains. +func (w *withMessage) Unwrap() error { return w.cause } + +func (w *withMessage) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + fmt.Fprintf(s, "%+v\n", w.Cause()) + io.WriteString(s, w.msg) + return + } + fallthrough + case 's', 'q': + io.WriteString(s, w.Error()) + } +} + +// Cause returns the underlying cause of the error, if possible. +// An error value has a cause if it implements the following +// interface: +// +// type causer interface { +// Cause() error +// } +// +// If the error does not implement Cause, the original error will +// be returned. If the error is nil, nil will be returned without further +// investigation. +func Cause(err error) error { + type causer interface { + Cause() error + } + + for err != nil { + cause, ok := err.(causer) + if !ok { + break + } + err = cause.Cause() + } + return err +} diff --git a/vendor/github.com/pkg/errors/go113.go b/vendor/github.com/pkg/errors/go113.go new file mode 100644 index 0000000000..be0d10d0c7 --- /dev/null +++ b/vendor/github.com/pkg/errors/go113.go @@ -0,0 +1,38 @@ +// +build go1.13 + +package errors + +import ( + stderrors "errors" +) + +// Is reports whether any error in err's chain matches target. +// +// The chain consists of err itself followed by the sequence of errors obtained by +// repeatedly calling Unwrap. +// +// An error is considered to match a target if it is equal to that target or if +// it implements a method Is(error) bool such that Is(target) returns true. +func Is(err, target error) bool { return stderrors.Is(err, target) } + +// As finds the first error in err's chain that matches target, and if so, sets +// target to that error value and returns true. +// +// The chain consists of err itself followed by the sequence of errors obtained by +// repeatedly calling Unwrap. +// +// An error matches target if the error's concrete value is assignable to the value +// pointed to by target, or if the error has a method As(interface{}) bool such that +// As(target) returns true. In the latter case, the As method is responsible for +// setting target. +// +// As will panic if target is not a non-nil pointer to either a type that implements +// error, or to any interface type. As returns false if err is nil. +func As(err error, target interface{}) bool { return stderrors.As(err, target) } + +// Unwrap returns the result of calling the Unwrap method on err, if err's +// type contains an Unwrap method returning error. +// Otherwise, Unwrap returns nil. +func Unwrap(err error) error { + return stderrors.Unwrap(err) +} diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go new file mode 100644 index 0000000000..779a8348fb --- /dev/null +++ b/vendor/github.com/pkg/errors/stack.go @@ -0,0 +1,177 @@ +package errors + +import ( + "fmt" + "io" + "path" + "runtime" + "strconv" + "strings" +) + +// Frame represents a program counter inside a stack frame. +// For historical reasons if Frame is interpreted as a uintptr +// its value represents the program counter + 1. +type Frame uintptr + +// pc returns the program counter for this frame; +// multiple frames may have the same PC value. +func (f Frame) pc() uintptr { return uintptr(f) - 1 } + +// file returns the full path to the file that contains the +// function for this Frame's pc. +func (f Frame) file() string { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return "unknown" + } + file, _ := fn.FileLine(f.pc()) + return file +} + +// line returns the line number of source code of the +// function for this Frame's pc. +func (f Frame) line() int { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return 0 + } + _, line := fn.FileLine(f.pc()) + return line +} + +// name returns the name of this function, if known. +func (f Frame) name() string { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return "unknown" + } + return fn.Name() +} + +// Format formats the frame according to the fmt.Formatter interface. +// +// %s source file +// %d source line +// %n function name +// %v equivalent to %s:%d +// +// Format accepts flags that alter the printing of some verbs, as follows: +// +// %+s function name and path of source file relative to the compile time +// GOPATH separated by \n\t (\n\t) +// %+v equivalent to %+s:%d +func (f Frame) Format(s fmt.State, verb rune) { + switch verb { + case 's': + switch { + case s.Flag('+'): + io.WriteString(s, f.name()) + io.WriteString(s, "\n\t") + io.WriteString(s, f.file()) + default: + io.WriteString(s, path.Base(f.file())) + } + case 'd': + io.WriteString(s, strconv.Itoa(f.line())) + case 'n': + io.WriteString(s, funcname(f.name())) + case 'v': + f.Format(s, 's') + io.WriteString(s, ":") + f.Format(s, 'd') + } +} + +// MarshalText formats a stacktrace Frame as a text string. The output is the +// same as that of fmt.Sprintf("%+v", f), but without newlines or tabs. +func (f Frame) MarshalText() ([]byte, error) { + name := f.name() + if name == "unknown" { + return []byte(name), nil + } + return []byte(fmt.Sprintf("%s %s:%d", name, f.file(), f.line())), nil +} + +// StackTrace is stack of Frames from innermost (newest) to outermost (oldest). +type StackTrace []Frame + +// Format formats the stack of Frames according to the fmt.Formatter interface. +// +// %s lists source files for each Frame in the stack +// %v lists the source file and line number for each Frame in the stack +// +// Format accepts flags that alter the printing of some verbs, as follows: +// +// %+v Prints filename, function, and line number for each Frame in the stack. +func (st StackTrace) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + switch { + case s.Flag('+'): + for _, f := range st { + io.WriteString(s, "\n") + f.Format(s, verb) + } + case s.Flag('#'): + fmt.Fprintf(s, "%#v", []Frame(st)) + default: + st.formatSlice(s, verb) + } + case 's': + st.formatSlice(s, verb) + } +} + +// formatSlice will format this StackTrace into the given buffer as a slice of +// Frame, only valid when called with '%s' or '%v'. +func (st StackTrace) formatSlice(s fmt.State, verb rune) { + io.WriteString(s, "[") + for i, f := range st { + if i > 0 { + io.WriteString(s, " ") + } + f.Format(s, verb) + } + io.WriteString(s, "]") +} + +// stack represents a stack of program counters. +type stack []uintptr + +func (s *stack) Format(st fmt.State, verb rune) { + switch verb { + case 'v': + switch { + case st.Flag('+'): + for _, pc := range *s { + f := Frame(pc) + fmt.Fprintf(st, "\n%+v", f) + } + } + } +} + +func (s *stack) StackTrace() StackTrace { + f := make([]Frame, len(*s)) + for i := 0; i < len(f); i++ { + f[i] = Frame((*s)[i]) + } + return f +} + +func callers() *stack { + const depth = 32 + var pcs [depth]uintptr + n := runtime.Callers(3, pcs[:]) + var st stack = pcs[0:n] + return &st +} + +// funcname removes the path prefix component of a function's name reported by func.Name(). +func funcname(name string) string { + i := strings.LastIndex(name, "/") + name = name[i+1:] + i = strings.Index(name, ".") + return name[i+1:] +} diff --git a/vendor/go.opencensus.io/exporter/prometheus/prometheus.go b/vendor/go.opencensus.io/exporter/prometheus/prometheus.go index 50665dcb1e..203bd38adf 100644 --- a/vendor/go.opencensus.io/exporter/prometheus/prometheus.go +++ b/vendor/go.opencensus.io/exporter/prometheus/prometheus.go @@ -21,7 +21,6 @@ import ( "fmt" "log" "net/http" - "sort" "sync" "go.opencensus.io/internal" @@ -44,9 +43,10 @@ type Exporter struct { // Options contains options for configuring the exporter. type Options struct { - Namespace string - Registry *prometheus.Registry - OnError func(err error) + Namespace string + Registry *prometheus.Registry + OnError func(err error) + ConstLabels prometheus.Labels // ConstLabels will be set as labels on all views. } // NewExporter returns an exporter that exports stats to Prometheus. @@ -80,7 +80,7 @@ func (c *collector) registerViews(views ...*view.View) { viewName(c.opts.Namespace, view), view.Description, tagKeysToLabels(view.TagKeys), - nil, + c.opts.ConstLabels, ) c.registeredViewsMu.Lock() c.registeredViews[sig] = desc @@ -207,40 +207,24 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) { func (c *collector) toMetric(desc *prometheus.Desc, v *view.View, row *view.Row) (prometheus.Metric, error) { switch data := row.Data.(type) { case *view.CountData: - return prometheus.NewConstMetric(desc, prometheus.CounterValue, float64(data.Value), tagValues(row.Tags)...) + return prometheus.NewConstMetric(desc, prometheus.CounterValue, float64(data.Value), tagValues(row.Tags, v.TagKeys)...) case *view.DistributionData: points := make(map[float64]uint64) // Histograms are cumulative in Prometheus. - // 1. Sort buckets in ascending order but, retain - // their indices for reverse lookup later on. - // TODO: If there is a guarantee that distribution elements - // are always sorted, then skip the sorting. - indicesMap := make(map[float64]int) - buckets := make([]float64, 0, len(v.Aggregation.Buckets)) - for i, b := range v.Aggregation.Buckets { - if _, ok := indicesMap[b]; !ok { - indicesMap[b] = i - buckets = append(buckets, b) - } - } - sort.Float64s(buckets) - - // 2. Now that the buckets are sorted by magnitude - // we can create cumulative indicesmap them back by reverse index + // Get cumulative bucket counts. cumCount := uint64(0) - for _, b := range buckets { - i := indicesMap[b] + for i, b := range v.Aggregation.Buckets { cumCount += uint64(data.CountPerBucket[i]) points[b] = cumCount } - return prometheus.NewConstHistogram(desc, uint64(data.Count), data.Sum(), points, tagValues(row.Tags)...) + return prometheus.NewConstHistogram(desc, uint64(data.Count), data.Sum(), points, tagValues(row.Tags, v.TagKeys)...) case *view.SumData: - return prometheus.NewConstMetric(desc, prometheus.UntypedValue, data.Value, tagValues(row.Tags)...) + return prometheus.NewConstMetric(desc, prometheus.UntypedValue, data.Value, tagValues(row.Tags, v.TagKeys)...) case *view.LastValueData: - return prometheus.NewConstMetric(desc, prometheus.GaugeValue, data.Value, tagValues(row.Tags)...) + return prometheus.NewConstMetric(desc, prometheus.GaugeValue, data.Value, tagValues(row.Tags, v.TagKeys)...) default: return nil, fmt.Errorf("aggregation %T is not yet supported", v.Aggregation) @@ -254,14 +238,6 @@ func tagKeysToLabels(keys []tag.Key) (labels []string) { return labels } -func tagsToLabels(tags []tag.Tag) []string { - var names []string - for _, tag := range tags { - names = append(names, internal.Sanitize(tag.Key.Name())) - } - return names -} - func newCollector(opts Options, registrar *prometheus.Registry) *collector { return &collector{ reg: registrar, @@ -271,10 +247,21 @@ func newCollector(opts Options, registrar *prometheus.Registry) *collector { } } -func tagValues(t []tag.Tag) []string { +func tagValues(t []tag.Tag, expectedKeys []tag.Key) []string { var values []string + // Add empty string for all missing keys in the tags map. + idx := 0 for _, t := range t { + for t.Key != expectedKeys[idx] { + idx++ + values = append(values, "") + } values = append(values, t.Value) + idx++ + } + for idx < len(expectedKeys) { + idx++ + values = append(values, "") } return values } diff --git a/vendor/go.opencensus.io/exporter/zipkin/zipkin.go b/vendor/go.opencensus.io/exporter/zipkin/zipkin.go index 30d2fa4380..69de705711 100644 --- a/vendor/go.opencensus.io/exporter/zipkin/zipkin.go +++ b/vendor/go.opencensus.io/exporter/zipkin/zipkin.go @@ -149,6 +149,8 @@ func zipkinSpan(s *trace.SpanData, localEndpoint *model.Endpoint) model.SpanMode } case int64: m[key] = strconv.FormatInt(v, 10) + case float64: + m[key] = strconv.FormatFloat(v, 'f', -1, 64) } } z.Tags = m diff --git a/vendor/go.opencensus.io/internal/internal.go b/vendor/go.opencensus.io/internal/internal.go index e1d1238d01..9a638781cf 100644 --- a/vendor/go.opencensus.io/internal/internal.go +++ b/vendor/go.opencensus.io/internal/internal.go @@ -18,12 +18,12 @@ import ( "fmt" "time" - "go.opencensus.io" + opencensus "go.opencensus.io" ) // UserAgent is the user agent to be added to the outgoing // requests from the exporters. -var UserAgent = fmt.Sprintf("opencensus-go [%s]", opencensus.Version()) +var UserAgent = fmt.Sprintf("opencensus-go/%s", opencensus.Version()) // MonotonicEndTime returns the end time at present // but offset from start, monotonically. diff --git a/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go b/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go index 3b1af8b4b8..41b2c3fc03 100644 --- a/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go +++ b/vendor/go.opencensus.io/internal/tagencoding/tagencoding.go @@ -17,6 +17,7 @@ // used interally by the stats collector. package tagencoding // import "go.opencensus.io/internal/tagencoding" +// Values represent the encoded buffer for the values. type Values struct { Buffer []byte WriteIndex int @@ -31,6 +32,7 @@ func (vb *Values) growIfRequired(expected int) { } } +// WriteValue is the helper method to encode Values from map[Key][]byte. func (vb *Values) WriteValue(v []byte) { length := len(v) & 0xff vb.growIfRequired(1 + length) @@ -49,7 +51,7 @@ func (vb *Values) WriteValue(v []byte) { vb.WriteIndex += length } -// ReadValue is the helper method to read the values when decoding valuesBytes to a map[Key][]byte. +// ReadValue is the helper method to decode Values to a map[Key][]byte. func (vb *Values) ReadValue() []byte { // read length of v length := int(vb.Buffer[vb.ReadIndex]) @@ -67,6 +69,7 @@ func (vb *Values) ReadValue() []byte { return v } +// Bytes returns a reference to already written bytes in the Buffer. func (vb *Values) Bytes() []byte { return vb.Buffer[:vb.WriteIndex] } diff --git a/vendor/go.opencensus.io/internal/traceinternals.go b/vendor/go.opencensus.io/internal/traceinternals.go index 553ca68dc4..073af7b473 100644 --- a/vendor/go.opencensus.io/internal/traceinternals.go +++ b/vendor/go.opencensus.io/internal/traceinternals.go @@ -22,6 +22,7 @@ import ( // TODO(#412): remove this var Trace interface{} +// LocalSpanStoreEnabled true if the local span store is enabled. var LocalSpanStoreEnabled bool // BucketConfiguration stores the number of samples to store for span buckets diff --git a/vendor/go.opencensus.io/stats/internal/validation.go b/vendor/go.opencensus.io/metric/metricdata/doc.go similarity index 72% rename from vendor/go.opencensus.io/stats/internal/validation.go rename to vendor/go.opencensus.io/metric/metricdata/doc.go index b946667f96..52a7b3bf85 100644 --- a/vendor/go.opencensus.io/stats/internal/validation.go +++ b/vendor/go.opencensus.io/metric/metricdata/doc.go @@ -12,17 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -package internal // import "go.opencensus.io/stats/internal" - -const ( - MaxNameLength = 255 -) - -func IsPrintable(str string) bool { - for _, r := range str { - if !(r >= ' ' && r <= '~') { - return false - } - } - return true -} +// Package metricdata contains the metrics data model. +// +// This is an EXPERIMENTAL package, and may change in arbitrary ways without +// notice. +package metricdata // import "go.opencensus.io/metric/metricdata" diff --git a/vendor/go.opencensus.io/metric/metricdata/exemplar.go b/vendor/go.opencensus.io/metric/metricdata/exemplar.go new file mode 100644 index 0000000000..cdbeef0586 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/exemplar.go @@ -0,0 +1,33 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricdata + +import ( + "time" +) + +// Exemplar is an example data point associated with each bucket of a +// distribution type aggregation. +// +// Their purpose is to provide an example of the kind of thing +// (request, RPC, trace span, etc.) that resulted in that measurement. +type Exemplar struct { + Value float64 // the value that was recorded + Timestamp time.Time // the time the value was recorded + Attachments Attachments // attachments (if any) +} + +// Attachments is a map of extra values associated with a recorded data point. +type Attachments map[string]interface{} diff --git a/vendor/go.opencensus.io/metric/metricdata/label.go b/vendor/go.opencensus.io/metric/metricdata/label.go new file mode 100644 index 0000000000..87c55b9c86 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/label.go @@ -0,0 +1,28 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricdata + +// LabelValue represents the value of a label. +// The zero value represents a missing label value, which may be treated +// differently to an empty string value by some back ends. +type LabelValue struct { + Value string // string value of the label + Present bool // flag that indicated whether a value is present or not +} + +// NewLabelValue creates a new non-nil LabelValue that represents the given string. +func NewLabelValue(val string) LabelValue { + return LabelValue{Value: val, Present: true} +} diff --git a/vendor/go.opencensus.io/metric/metricdata/metric.go b/vendor/go.opencensus.io/metric/metricdata/metric.go new file mode 100644 index 0000000000..6ccdec5837 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/metric.go @@ -0,0 +1,46 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricdata + +import ( + "time" + + "go.opencensus.io/resource" +) + +// Descriptor holds metadata about a metric. +type Descriptor struct { + Name string // full name of the metric + Description string // human-readable description + Unit Unit // units for the measure + Type Type // type of measure + LabelKeys []string // label keys +} + +// Metric represents a quantity measured against a resource with different +// label value combinations. +type Metric struct { + Descriptor Descriptor // metric descriptor + Resource *resource.Resource // resource against which this was measured + TimeSeries []*TimeSeries // one time series for each combination of label values +} + +// TimeSeries is a sequence of points associated with a combination of label +// values. +type TimeSeries struct { + LabelValues []LabelValue // label values, same order as keys in the metric descriptor + Points []Point // points sequence + StartTime time.Time // time we started recording this time series +} diff --git a/vendor/go.opencensus.io/metric/metricdata/point.go b/vendor/go.opencensus.io/metric/metricdata/point.go new file mode 100644 index 0000000000..7fe057b19c --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/point.go @@ -0,0 +1,193 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricdata + +import ( + "time" +) + +// Point is a single data point of a time series. +type Point struct { + // Time is the point in time that this point represents in a time series. + Time time.Time + // Value is the value of this point. Prefer using ReadValue to switching on + // the value type, since new value types might be added. + Value interface{} +} + +//go:generate stringer -type ValueType + +// NewFloat64Point creates a new Point holding a float64 value. +func NewFloat64Point(t time.Time, val float64) Point { + return Point{ + Value: val, + Time: t, + } +} + +// NewInt64Point creates a new Point holding an int64 value. +func NewInt64Point(t time.Time, val int64) Point { + return Point{ + Value: val, + Time: t, + } +} + +// NewDistributionPoint creates a new Point holding a Distribution value. +func NewDistributionPoint(t time.Time, val *Distribution) Point { + return Point{ + Value: val, + Time: t, + } +} + +// NewSummaryPoint creates a new Point holding a Summary value. +func NewSummaryPoint(t time.Time, val *Summary) Point { + return Point{ + Value: val, + Time: t, + } +} + +// ValueVisitor allows reading the value of a point. +type ValueVisitor interface { + VisitFloat64Value(float64) + VisitInt64Value(int64) + VisitDistributionValue(*Distribution) + VisitSummaryValue(*Summary) +} + +// ReadValue accepts a ValueVisitor and calls the appropriate method with the +// value of this point. +// Consumers of Point should use this in preference to switching on the type +// of the value directly, since new value types may be added. +func (p Point) ReadValue(vv ValueVisitor) { + switch v := p.Value.(type) { + case int64: + vv.VisitInt64Value(v) + case float64: + vv.VisitFloat64Value(v) + case *Distribution: + vv.VisitDistributionValue(v) + case *Summary: + vv.VisitSummaryValue(v) + default: + panic("unexpected value type") + } +} + +// Distribution contains summary statistics for a population of values. It +// optionally contains a histogram representing the distribution of those +// values across a set of buckets. +type Distribution struct { + // Count is the number of values in the population. Must be non-negative. This value + // must equal the sum of the values in bucket_counts if a histogram is + // provided. + Count int64 + // Sum is the sum of the values in the population. If count is zero then this field + // must be zero. + Sum float64 + // SumOfSquaredDeviation is the sum of squared deviations from the mean of the values in the + // population. For values x_i this is: + // + // Sum[i=1..n]((x_i - mean)^2) + // + // Knuth, "The Art of Computer Programming", Vol. 2, page 323, 3rd edition + // describes Welford's method for accumulating this sum in one pass. + // + // If count is zero then this field must be zero. + SumOfSquaredDeviation float64 + // BucketOptions describes the bounds of the histogram buckets in this + // distribution. + // + // A Distribution may optionally contain a histogram of the values in the + // population. + // + // If nil, there is no associated histogram. + BucketOptions *BucketOptions + // Bucket If the distribution does not have a histogram, then omit this field. + // If there is a histogram, then the sum of the values in the Bucket counts + // must equal the value in the count field of the distribution. + Buckets []Bucket +} + +// BucketOptions describes the bounds of the histogram buckets in this +// distribution. +type BucketOptions struct { + // Bounds specifies a set of bucket upper bounds. + // This defines len(bounds) + 1 (= N) buckets. The boundaries for bucket + // index i are: + // + // [0, Bounds[i]) for i == 0 + // [Bounds[i-1], Bounds[i]) for 0 < i < N-1 + // [Bounds[i-1], +infinity) for i == N-1 + Bounds []float64 +} + +// Bucket represents a single bucket (value range) in a distribution. +type Bucket struct { + // Count is the number of values in each bucket of the histogram, as described in + // bucket_bounds. + Count int64 + // Exemplar associated with this bucket (if any). + Exemplar *Exemplar +} + +// Summary is a representation of percentiles. +type Summary struct { + // Count is the cumulative count (if available). + Count int64 + // Sum is the cumulative sum of values (if available). + Sum float64 + // HasCountAndSum is true if Count and Sum are available. + HasCountAndSum bool + // Snapshot represents percentiles calculated over an arbitrary time window. + // The values in this struct can be reset at arbitrary unknown times, with + // the requirement that all of them are reset at the same time. + Snapshot Snapshot +} + +// Snapshot represents percentiles over an arbitrary time. +// The values in this struct can be reset at arbitrary unknown times, with +// the requirement that all of them are reset at the same time. +type Snapshot struct { + // Count is the number of values in the snapshot. Optional since some systems don't + // expose this. Set to 0 if not available. + Count int64 + // Sum is the sum of values in the snapshot. Optional since some systems don't + // expose this. If count is 0 then this field must be zero. + Sum float64 + // Percentiles is a map from percentile (range (0-100.0]) to the value of + // the percentile. + Percentiles map[float64]float64 +} + +//go:generate stringer -type Type + +// Type is the overall type of metric, including its value type and whether it +// represents a cumulative total (since the start time) or if it represents a +// gauge value. +type Type int + +// Metric types. +const ( + TypeGaugeInt64 Type = iota + TypeGaugeFloat64 + TypeGaugeDistribution + TypeCumulativeInt64 + TypeCumulativeFloat64 + TypeCumulativeDistribution + TypeSummary +) diff --git a/vendor/go.opencensus.io/metric/metricdata/type_string.go b/vendor/go.opencensus.io/metric/metricdata/type_string.go new file mode 100644 index 0000000000..c3f8ec27b5 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/type_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type Type"; DO NOT EDIT. + +package metricdata + +import "strconv" + +const _Type_name = "TypeGaugeInt64TypeGaugeFloat64TypeGaugeDistributionTypeCumulativeInt64TypeCumulativeFloat64TypeCumulativeDistributionTypeSummary" + +var _Type_index = [...]uint8{0, 14, 30, 51, 70, 91, 117, 128} + +func (i Type) String() string { + if i < 0 || i >= Type(len(_Type_index)-1) { + return "Type(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Type_name[_Type_index[i]:_Type_index[i+1]] +} diff --git a/vendor/go.opencensus.io/metric/metricdata/unit.go b/vendor/go.opencensus.io/metric/metricdata/unit.go new file mode 100644 index 0000000000..b483a1371b --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricdata/unit.go @@ -0,0 +1,27 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricdata + +// Unit is a string encoded according to the case-sensitive abbreviations from the +// Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html +type Unit string + +// Predefined units. To record against a unit not represented here, create your +// own Unit type constant from a string. +const ( + UnitDimensionless Unit = "1" + UnitBytes Unit = "By" + UnitMilliseconds Unit = "ms" +) diff --git a/vendor/go.opencensus.io/metric/metricproducer/manager.go b/vendor/go.opencensus.io/metric/metricproducer/manager.go new file mode 100644 index 0000000000..ca1f390493 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricproducer/manager.go @@ -0,0 +1,78 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricproducer + +import ( + "sync" +) + +// Manager maintains a list of active producers. Producers can register +// with the manager to allow readers to read all metrics provided by them. +// Readers can retrieve all producers registered with the manager, +// read metrics from the producers and export them. +type Manager struct { + mu sync.RWMutex + producers map[Producer]struct{} +} + +var prodMgr *Manager +var once sync.Once + +// GlobalManager is a single instance of producer manager +// that is used by all producers and all readers. +func GlobalManager() *Manager { + once.Do(func() { + prodMgr = &Manager{} + prodMgr.producers = make(map[Producer]struct{}) + }) + return prodMgr +} + +// AddProducer adds the producer to the Manager if it is not already present. +func (pm *Manager) AddProducer(producer Producer) { + if producer == nil { + return + } + pm.mu.Lock() + defer pm.mu.Unlock() + pm.producers[producer] = struct{}{} +} + +// DeleteProducer deletes the producer from the Manager if it is present. +func (pm *Manager) DeleteProducer(producer Producer) { + if producer == nil { + return + } + pm.mu.Lock() + defer pm.mu.Unlock() + delete(pm.producers, producer) +} + +// GetAll returns a slice of all producer currently registered with +// the Manager. For each call it generates a new slice. The slice +// should not be cached as registration may change at any time. It is +// typically called periodically by exporter to read metrics from +// the producers. +func (pm *Manager) GetAll() []Producer { + pm.mu.Lock() + defer pm.mu.Unlock() + producers := make([]Producer, len(pm.producers)) + i := 0 + for producer := range pm.producers { + producers[i] = producer + i++ + } + return producers +} diff --git a/vendor/go.opencensus.io/metric/metricproducer/producer.go b/vendor/go.opencensus.io/metric/metricproducer/producer.go new file mode 100644 index 0000000000..6cee9ed178 --- /dev/null +++ b/vendor/go.opencensus.io/metric/metricproducer/producer.go @@ -0,0 +1,28 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricproducer + +import ( + "go.opencensus.io/metric/metricdata" +) + +// Producer is a source of metrics. +type Producer interface { + // Read should return the current values of all metrics supported by this + // metric provider. + // The returned metrics should be unique for each combination of name and + // resource. + Read() []*metricdata.Metric +} diff --git a/vendor/go.opencensus.io/opencensus.go b/vendor/go.opencensus.io/opencensus.go index eb8e7213d4..d2565f1e2b 100644 --- a/vendor/go.opencensus.io/opencensus.go +++ b/vendor/go.opencensus.io/opencensus.go @@ -17,5 +17,5 @@ package opencensus // import "go.opencensus.io" // Version is the current release version of OpenCensus in use. func Version() string { - return "0.14.0" + return "0.21.0" } diff --git a/vendor/go.opencensus.io/plugin/ochttp/client.go b/vendor/go.opencensus.io/plugin/ochttp/client.go index 1807921064..da815b2a73 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/client.go +++ b/vendor/go.opencensus.io/plugin/ochttp/client.go @@ -16,6 +16,7 @@ package ochttp import ( "net/http" + "net/http/httptrace" "go.opencensus.io/trace" "go.opencensus.io/trace/propagation" @@ -46,17 +47,29 @@ type Transport struct { // for spans started by this transport. StartOptions trace.StartOptions + // GetStartOptions allows to set start options per request. If set, + // StartOptions is going to be ignored. + GetStartOptions func(*http.Request) trace.StartOptions + // NameFromRequest holds the function to use for generating the span name // from the information found in the outgoing HTTP Request. By default the // name equals the URL Path. FormatSpanName func(*http.Request) string + // NewClientTrace may be set to a function allowing the current *trace.Span + // to be annotated with HTTP request event information emitted by the + // httptrace package. + NewClientTrace func(*http.Request, *trace.Span) *httptrace.ClientTrace + // TODO: Implement tag propagation for HTTP. } // RoundTrip implements http.RoundTripper, delegating to Base and recording stats and traces for the request. func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { rt := t.base() + if isHealthEndpoint(req.URL.Path) { + return rt.RoundTrip(req) + } // TODO: remove excessive nesting of http.RoundTrippers here. format := t.Propagation if format == nil { @@ -66,14 +79,21 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { if spanNameFormatter == nil { spanNameFormatter = spanNameFromURL } + + startOpts := t.StartOptions + if t.GetStartOptions != nil { + startOpts = t.GetStartOptions(req) + } + rt = &traceTransport{ base: rt, format: format, startOptions: trace.StartOptions{ - Sampler: t.StartOptions.Sampler, + Sampler: startOpts.Sampler, SpanKind: trace.SpanKindClient, }, formatSpanName: spanNameFormatter, + newClientTrace: t.NewClientTrace, } rt = statsTransport{base: rt} return rt.RoundTrip(req) diff --git a/vendor/go.opencensus.io/plugin/ochttp/client_stats.go b/vendor/go.opencensus.io/plugin/ochttp/client_stats.go index 9b286b929b..17142aabe0 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/client_stats.go +++ b/vendor/go.opencensus.io/plugin/ochttp/client_stats.go @@ -34,8 +34,11 @@ type statsTransport struct { // RoundTrip implements http.RoundTripper, delegating to Base and recording stats for the request. func (t statsTransport) RoundTrip(req *http.Request) (*http.Response, error) { ctx, _ := tag.New(req.Context(), - tag.Upsert(Host, req.URL.Host), + tag.Upsert(KeyClientHost, req.Host), + tag.Upsert(Host, req.Host), + tag.Upsert(KeyClientPath, req.URL.Path), tag.Upsert(Path, req.URL.Path), + tag.Upsert(KeyClientMethod, req.Method), tag.Upsert(Method, req.Method)) req = req.WithContext(ctx) track := &tracker{ @@ -58,11 +61,14 @@ func (t statsTransport) RoundTrip(req *http.Request) (*http.Response, error) { track.end() } else { track.statusCode = resp.StatusCode + if req.Method != "HEAD" { + track.respContentLength = resp.ContentLength + } if resp.Body == nil { track.end() } else { track.body = resp.Body - resp.Body = track + resp.Body = wrappedBody(track, resp.Body) } } return resp, err @@ -79,36 +85,48 @@ func (t statsTransport) CancelRequest(req *http.Request) { } type tracker struct { - ctx context.Context - respSize int64 - reqSize int64 - start time.Time - body io.ReadCloser - statusCode int - endOnce sync.Once + ctx context.Context + respSize int64 + respContentLength int64 + reqSize int64 + start time.Time + body io.ReadCloser + statusCode int + endOnce sync.Once } var _ io.ReadCloser = (*tracker)(nil) func (t *tracker) end() { t.endOnce.Do(func() { + latencyMs := float64(time.Since(t.start)) / float64(time.Millisecond) + respSize := t.respSize + if t.respSize == 0 && t.respContentLength > 0 { + respSize = t.respContentLength + } m := []stats.Measurement{ - ClientLatency.M(float64(time.Since(t.start)) / float64(time.Millisecond)), + ClientSentBytes.M(t.reqSize), + ClientReceivedBytes.M(respSize), + ClientRoundtripLatency.M(latencyMs), + ClientLatency.M(latencyMs), ClientResponseBytes.M(t.respSize), } if t.reqSize >= 0 { m = append(m, ClientRequestBytes.M(t.reqSize)) } - ctx, _ := tag.New(t.ctx, tag.Upsert(StatusCode, strconv.Itoa(t.statusCode))) - stats.Record(ctx, m...) + + stats.RecordWithTags(t.ctx, []tag.Mutator{ + tag.Upsert(StatusCode, strconv.Itoa(t.statusCode)), + tag.Upsert(KeyClientStatus, strconv.Itoa(t.statusCode)), + }, m...) }) } func (t *tracker) Read(b []byte) (int, error) { n, err := t.body.Read(b) + t.respSize += int64(n) switch err { case nil: - t.respSize += int64(n) return n, nil case io.EOF: t.end() diff --git a/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go b/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go index f777772ec9..2f1c7f0063 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go +++ b/vendor/go.opencensus.io/plugin/ochttp/propagation/b3/b3.go @@ -38,7 +38,7 @@ const ( // because there are additional fields not represented in the // OpenCensus span context. Spans created from the incoming // header will be the direct children of the client-side span. -// Similarly, reciever of the outgoing spans should use client-side +// Similarly, receiver of the outgoing spans should use client-side // span created by OpenCensus as the parent. type HTTPFormat struct{} diff --git a/vendor/go.opencensus.io/plugin/ochttp/propagation/tracecontext/propagation.go b/vendor/go.opencensus.io/plugin/ochttp/propagation/tracecontext/propagation.go new file mode 100644 index 0000000000..65ab1e9966 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/propagation/tracecontext/propagation.go @@ -0,0 +1,187 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package tracecontext contains HTTP propagator for TraceContext standard. +// See https://github.com/w3c/distributed-tracing for more information. +package tracecontext // import "go.opencensus.io/plugin/ochttp/propagation/tracecontext" + +import ( + "encoding/hex" + "fmt" + "net/http" + "net/textproto" + "regexp" + "strings" + + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" + "go.opencensus.io/trace/tracestate" +) + +const ( + supportedVersion = 0 + maxVersion = 254 + maxTracestateLen = 512 + traceparentHeader = "traceparent" + tracestateHeader = "tracestate" + trimOWSRegexFmt = `^[\x09\x20]*(.*[^\x20\x09])[\x09\x20]*$` +) + +var trimOWSRegExp = regexp.MustCompile(trimOWSRegexFmt) + +var _ propagation.HTTPFormat = (*HTTPFormat)(nil) + +// HTTPFormat implements the TraceContext trace propagation format. +type HTTPFormat struct{} + +// SpanContextFromRequest extracts a span context from incoming requests. +func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { + h, ok := getRequestHeader(req, traceparentHeader, false) + if !ok { + return trace.SpanContext{}, false + } + sections := strings.Split(h, "-") + if len(sections) < 4 { + return trace.SpanContext{}, false + } + + if len(sections[0]) != 2 { + return trace.SpanContext{}, false + } + ver, err := hex.DecodeString(sections[0]) + if err != nil { + return trace.SpanContext{}, false + } + version := int(ver[0]) + if version > maxVersion { + return trace.SpanContext{}, false + } + + if version == 0 && len(sections) != 4 { + return trace.SpanContext{}, false + } + + if len(sections[1]) != 32 { + return trace.SpanContext{}, false + } + tid, err := hex.DecodeString(sections[1]) + if err != nil { + return trace.SpanContext{}, false + } + copy(sc.TraceID[:], tid) + + if len(sections[2]) != 16 { + return trace.SpanContext{}, false + } + sid, err := hex.DecodeString(sections[2]) + if err != nil { + return trace.SpanContext{}, false + } + copy(sc.SpanID[:], sid) + + opts, err := hex.DecodeString(sections[3]) + if err != nil || len(opts) < 1 { + return trace.SpanContext{}, false + } + sc.TraceOptions = trace.TraceOptions(opts[0]) + + // Don't allow all zero trace or span ID. + if sc.TraceID == [16]byte{} || sc.SpanID == [8]byte{} { + return trace.SpanContext{}, false + } + + sc.Tracestate = tracestateFromRequest(req) + return sc, true +} + +// getRequestHeader returns a combined header field according to RFC7230 section 3.2.2. +// If commaSeparated is true, multiple header fields with the same field name using be +// combined using ",". +// If no header was found using the given name, "ok" would be false. +// If more than one headers was found using the given name, while commaSeparated is false, +// "ok" would be false. +func getRequestHeader(req *http.Request, name string, commaSeparated bool) (hdr string, ok bool) { + v := req.Header[textproto.CanonicalMIMEHeaderKey(name)] + switch len(v) { + case 0: + return "", false + case 1: + return v[0], true + default: + return strings.Join(v, ","), commaSeparated + } +} + +// TODO(rghetia): return an empty Tracestate when parsing tracestate header encounters an error. +// Revisit to return additional boolean value to indicate parsing error when following issues +// are resolved. +// https://github.com/w3c/distributed-tracing/issues/172 +// https://github.com/w3c/distributed-tracing/issues/175 +func tracestateFromRequest(req *http.Request) *tracestate.Tracestate { + h, _ := getRequestHeader(req, tracestateHeader, true) + if h == "" { + return nil + } + + var entries []tracestate.Entry + pairs := strings.Split(h, ",") + hdrLenWithoutOWS := len(pairs) - 1 // Number of commas + for _, pair := range pairs { + matches := trimOWSRegExp.FindStringSubmatch(pair) + if matches == nil { + return nil + } + pair = matches[1] + hdrLenWithoutOWS += len(pair) + if hdrLenWithoutOWS > maxTracestateLen { + return nil + } + kv := strings.Split(pair, "=") + if len(kv) != 2 { + return nil + } + entries = append(entries, tracestate.Entry{Key: kv[0], Value: kv[1]}) + } + ts, err := tracestate.New(nil, entries...) + if err != nil { + return nil + } + + return ts +} + +func tracestateToRequest(sc trace.SpanContext, req *http.Request) { + var pairs = make([]string, 0, len(sc.Tracestate.Entries())) + if sc.Tracestate != nil { + for _, entry := range sc.Tracestate.Entries() { + pairs = append(pairs, strings.Join([]string{entry.Key, entry.Value}, "=")) + } + h := strings.Join(pairs, ",") + + if h != "" && len(h) <= maxTracestateLen { + req.Header.Set(tracestateHeader, h) + } + } +} + +// SpanContextToRequest modifies the given request to include traceparent and tracestate headers. +func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { + h := fmt.Sprintf("%x-%x-%x-%x", + []byte{supportedVersion}, + sc.TraceID[:], + sc.SpanID[:], + []byte{byte(sc.TraceOptions)}) + req.Header.Set(traceparentHeader, h) + tracestateToRequest(sc, req) +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/route.go b/vendor/go.opencensus.io/plugin/ochttp/route.go new file mode 100644 index 0000000000..5e6a343076 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/route.go @@ -0,0 +1,61 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "context" + "net/http" + + "go.opencensus.io/tag" +) + +// SetRoute sets the http_server_route tag to the given value. +// It's useful when an HTTP framework does not support the http.Handler interface +// and using WithRouteTag is not an option, but provides a way to hook into the request flow. +func SetRoute(ctx context.Context, route string) { + if a, ok := ctx.Value(addedTagsKey{}).(*addedTags); ok { + a.t = append(a.t, tag.Upsert(KeyServerRoute, route)) + } +} + +// WithRouteTag returns an http.Handler that records stats with the +// http_server_route tag set to the given value. +func WithRouteTag(handler http.Handler, route string) http.Handler { + return taggedHandlerFunc(func(w http.ResponseWriter, r *http.Request) []tag.Mutator { + addRoute := []tag.Mutator{tag.Upsert(KeyServerRoute, route)} + ctx, _ := tag.New(r.Context(), addRoute...) + r = r.WithContext(ctx) + handler.ServeHTTP(w, r) + return addRoute + }) +} + +// taggedHandlerFunc is a http.Handler that returns tags describing the +// processing of the request. These tags will be recorded along with the +// measures in this package at the end of the request. +type taggedHandlerFunc func(w http.ResponseWriter, r *http.Request) []tag.Mutator + +func (h taggedHandlerFunc) ServeHTTP(w http.ResponseWriter, r *http.Request) { + tags := h(w, r) + if a, ok := r.Context().Value(addedTagsKey{}).(*addedTags); ok { + a.t = append(a.t, tags...) + } +} + +type addedTagsKey struct{} + +type addedTags struct { + t []tag.Mutator +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/server.go b/vendor/go.opencensus.io/plugin/ochttp/server.go index fe2a6eb587..5fe15e89ff 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/server.go +++ b/vendor/go.opencensus.io/plugin/ochttp/server.go @@ -15,10 +15,8 @@ package ochttp import ( - "bufio" "context" - "errors" - "net" + "io" "net/http" "strconv" "sync" @@ -58,6 +56,10 @@ type Handler struct { // for spans started by this transport. StartOptions trace.StartOptions + // GetStartOptions allows to set start options per request. If set, + // StartOptions is going to be ignored. + GetStartOptions func(*http.Request) trace.StartOptions + // IsPublicEndpoint should be set to true for publicly accessible HTTP(S) // servers. If true, any trace metadata set on the incoming request will // be added as a linked trace instead of being added as a parent of the @@ -71,19 +73,23 @@ type Handler struct { } func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - var traceEnd, statsEnd func() - r, traceEnd = h.startTrace(w, r) + var tags addedTags + r, traceEnd := h.startTrace(w, r) defer traceEnd() - w, statsEnd = h.startStats(w, r) - defer statsEnd() + w, statsEnd := h.startStats(w, r) + defer statsEnd(&tags) handler := h.Handler if handler == nil { handler = http.DefaultServeMux } + r = r.WithContext(context.WithValue(r.Context(), addedTagsKey{}, &tags)) handler.ServeHTTP(w, r) } func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Request, func()) { + if isHealthEndpoint(r.URL.Path) { + return r, func() {} + } var name string if h.FormatSpanName == nil { name = spanNameFromURL(r) @@ -91,22 +97,28 @@ func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Requ name = h.FormatSpanName(r) } ctx := r.Context() + + startOpts := h.StartOptions + if h.GetStartOptions != nil { + startOpts = h.GetStartOptions(r) + } + var span *trace.Span sc, ok := h.extractSpanContext(r) if ok && !h.IsPublicEndpoint { ctx, span = trace.StartSpanWithRemoteParent(ctx, name, sc, - trace.WithSampler(h.StartOptions.Sampler), + trace.WithSampler(startOpts.Sampler), trace.WithSpanKind(trace.SpanKindServer)) } else { ctx, span = trace.StartSpan(ctx, name, - trace.WithSampler(h.StartOptions.Sampler), + trace.WithSampler(startOpts.Sampler), trace.WithSpanKind(trace.SpanKindServer), ) if ok { span.AddLink(trace.Link{ TraceID: sc.TraceID, SpanID: sc.SpanID, - Type: trace.LinkTypeChild, + Type: trace.LinkTypeParent, Attributes: nil, }) } @@ -122,9 +134,9 @@ func (h *Handler) extractSpanContext(r *http.Request) (trace.SpanContext, bool) return h.Propagation.SpanContextFromRequest(r) } -func (h *Handler) startStats(w http.ResponseWriter, r *http.Request) (http.ResponseWriter, func()) { +func (h *Handler) startStats(w http.ResponseWriter, r *http.Request) (http.ResponseWriter, func(tags *addedTags)) { ctx, _ := tag.New(r.Context(), - tag.Upsert(Host, r.URL.Host), + tag.Upsert(Host, r.Host), tag.Upsert(Path, r.URL.Path), tag.Upsert(Method, r.Method)) track := &trackingResponseWriter{ @@ -139,7 +151,7 @@ func (h *Handler) startStats(w http.ResponseWriter, r *http.Request) (http.Respo track.reqSize = r.ContentLength } stats.Record(ctx, ServerRequestCount.M(1)) - return track, track.end + return track.wrappedResponseWriter(), track.end } type trackingResponseWriter struct { @@ -153,40 +165,12 @@ type trackingResponseWriter struct { writer http.ResponseWriter } -// Compile time assertions for widely used net/http interfaces -var _ http.CloseNotifier = (*trackingResponseWriter)(nil) -var _ http.Flusher = (*trackingResponseWriter)(nil) -var _ http.Hijacker = (*trackingResponseWriter)(nil) -var _ http.Pusher = (*trackingResponseWriter)(nil) +// Compile time assertion for ResponseWriter interface var _ http.ResponseWriter = (*trackingResponseWriter)(nil) -var errHijackerUnimplemented = errors.New("ResponseWriter does not implement http.Hijacker") - -func (t *trackingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { - hj, ok := t.writer.(http.Hijacker) - if !ok { - return nil, nil, errHijackerUnimplemented - } - return hj.Hijack() -} +var logTagsErrorOnce sync.Once -func (t *trackingResponseWriter) CloseNotify() <-chan bool { - cn, ok := t.writer.(http.CloseNotifier) - if !ok { - return nil - } - return cn.CloseNotify() -} - -func (t *trackingResponseWriter) Push(target string, opts *http.PushOptions) error { - pusher, ok := t.writer.(http.Pusher) - if !ok { - return http.ErrNotSupported - } - return pusher.Push(target, opts) -} - -func (t *trackingResponseWriter) end() { +func (t *trackingResponseWriter) end(tags *addedTags) { t.endOnce.Do(func() { if t.statusCode == 0 { t.statusCode = 200 @@ -194,6 +178,7 @@ func (t *trackingResponseWriter) end() { span := trace.FromContext(t.ctx) span.SetStatus(TraceStatus(t.statusCode, t.statusLine)) + span.AddAttributes(trace.Int64Attribute(StatusCodeAttribute, int64(t.statusCode))) m := []stats.Measurement{ ServerLatency.M(float64(time.Since(t.start)) / float64(time.Millisecond)), @@ -202,8 +187,10 @@ func (t *trackingResponseWriter) end() { if t.reqSize >= 0 { m = append(m, ServerRequestBytes.M(t.reqSize)) } - ctx, _ := tag.New(t.ctx, tag.Upsert(StatusCode, strconv.Itoa(t.statusCode))) - stats.Record(ctx, m...) + allTags := make([]tag.Mutator, len(tags.t)+1) + allTags[0] = tag.Upsert(StatusCode, strconv.Itoa(t.statusCode)) + copy(allTags[1:], tags.t) + stats.RecordWithTags(t.ctx, allTags, m...) }) } @@ -223,8 +210,231 @@ func (t *trackingResponseWriter) WriteHeader(statusCode int) { t.statusLine = http.StatusText(t.statusCode) } -func (t *trackingResponseWriter) Flush() { - if flusher, ok := t.writer.(http.Flusher); ok { - flusher.Flush() +// wrappedResponseWriter returns a wrapped version of the original +// ResponseWriter and only implements the same combination of additional +// interfaces as the original. +// This implementation is based on https://github.com/felixge/httpsnoop. +func (t *trackingResponseWriter) wrappedResponseWriter() http.ResponseWriter { + var ( + hj, i0 = t.writer.(http.Hijacker) + cn, i1 = t.writer.(http.CloseNotifier) + pu, i2 = t.writer.(http.Pusher) + fl, i3 = t.writer.(http.Flusher) + rf, i4 = t.writer.(io.ReaderFrom) + ) + + switch { + case !i0 && !i1 && !i2 && !i3 && !i4: + return struct { + http.ResponseWriter + }{t} + case !i0 && !i1 && !i2 && !i3 && i4: + return struct { + http.ResponseWriter + io.ReaderFrom + }{t, rf} + case !i0 && !i1 && !i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Flusher + }{t, fl} + case !i0 && !i1 && !i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Flusher + io.ReaderFrom + }{t, fl, rf} + case !i0 && !i1 && i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Pusher + }{t, pu} + case !i0 && !i1 && i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Pusher + io.ReaderFrom + }{t, pu, rf} + case !i0 && !i1 && i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Pusher + http.Flusher + }{t, pu, fl} + case !i0 && !i1 && i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Pusher + http.Flusher + io.ReaderFrom + }{t, pu, fl, rf} + case !i0 && i1 && !i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.CloseNotifier + }{t, cn} + case !i0 && i1 && !i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.CloseNotifier + io.ReaderFrom + }{t, cn, rf} + case !i0 && i1 && !i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Flusher + }{t, cn, fl} + case !i0 && i1 && !i2 && i3 && i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Flusher + io.ReaderFrom + }{t, cn, fl, rf} + case !i0 && i1 && i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Pusher + }{t, cn, pu} + case !i0 && i1 && i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Pusher + io.ReaderFrom + }{t, cn, pu, rf} + case !i0 && i1 && i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Pusher + http.Flusher + }{t, cn, pu, fl} + case !i0 && i1 && i2 && i3 && i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Pusher + http.Flusher + io.ReaderFrom + }{t, cn, pu, fl, rf} + case i0 && !i1 && !i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + }{t, hj} + case i0 && !i1 && !i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + io.ReaderFrom + }{t, hj, rf} + case i0 && !i1 && !i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Flusher + }{t, hj, fl} + case i0 && !i1 && !i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Flusher + io.ReaderFrom + }{t, hj, fl, rf} + case i0 && !i1 && i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Pusher + }{t, hj, pu} + case i0 && !i1 && i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Pusher + io.ReaderFrom + }{t, hj, pu, rf} + case i0 && !i1 && i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Pusher + http.Flusher + }{t, hj, pu, fl} + case i0 && !i1 && i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Pusher + http.Flusher + io.ReaderFrom + }{t, hj, pu, fl, rf} + case i0 && i1 && !i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + }{t, hj, cn} + case i0 && i1 && !i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + io.ReaderFrom + }{t, hj, cn, rf} + case i0 && i1 && !i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Flusher + }{t, hj, cn, fl} + case i0 && i1 && !i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Flusher + io.ReaderFrom + }{t, hj, cn, fl, rf} + case i0 && i1 && i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Pusher + }{t, hj, cn, pu} + case i0 && i1 && i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Pusher + io.ReaderFrom + }{t, hj, cn, pu, rf} + case i0 && i1 && i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Pusher + http.Flusher + }{t, hj, cn, pu, fl} + case i0 && i1 && i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Pusher + http.Flusher + io.ReaderFrom + }{t, hj, cn, pu, fl, rf} + default: + return struct { + http.ResponseWriter + }{t} } } diff --git a/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go b/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go new file mode 100644 index 0000000000..05c6c56cc7 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/span_annotating_client_trace.go @@ -0,0 +1,169 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "crypto/tls" + "net/http" + "net/http/httptrace" + "strings" + + "go.opencensus.io/trace" +) + +type spanAnnotator struct { + sp *trace.Span +} + +// TODO: Remove NewSpanAnnotator at the next release. + +// NewSpanAnnotator returns a httptrace.ClientTrace which annotates +// all emitted httptrace events on the provided Span. +// Deprecated: Use NewSpanAnnotatingClientTrace instead +func NewSpanAnnotator(r *http.Request, s *trace.Span) *httptrace.ClientTrace { + return NewSpanAnnotatingClientTrace(r, s) +} + +// NewSpanAnnotatingClientTrace returns a httptrace.ClientTrace which annotates +// all emitted httptrace events on the provided Span. +func NewSpanAnnotatingClientTrace(_ *http.Request, s *trace.Span) *httptrace.ClientTrace { + sa := spanAnnotator{sp: s} + + return &httptrace.ClientTrace{ + GetConn: sa.getConn, + GotConn: sa.gotConn, + PutIdleConn: sa.putIdleConn, + GotFirstResponseByte: sa.gotFirstResponseByte, + Got100Continue: sa.got100Continue, + DNSStart: sa.dnsStart, + DNSDone: sa.dnsDone, + ConnectStart: sa.connectStart, + ConnectDone: sa.connectDone, + TLSHandshakeStart: sa.tlsHandshakeStart, + TLSHandshakeDone: sa.tlsHandshakeDone, + WroteHeaders: sa.wroteHeaders, + Wait100Continue: sa.wait100Continue, + WroteRequest: sa.wroteRequest, + } +} + +func (s spanAnnotator) getConn(hostPort string) { + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.get_connection.host_port", hostPort), + } + s.sp.Annotate(attrs, "GetConn") +} + +func (s spanAnnotator) gotConn(info httptrace.GotConnInfo) { + attrs := []trace.Attribute{ + trace.BoolAttribute("httptrace.got_connection.reused", info.Reused), + trace.BoolAttribute("httptrace.got_connection.was_idle", info.WasIdle), + } + if info.WasIdle { + attrs = append(attrs, + trace.StringAttribute("httptrace.got_connection.idle_time", info.IdleTime.String())) + } + s.sp.Annotate(attrs, "GotConn") +} + +// PutIdleConn implements a httptrace.ClientTrace hook +func (s spanAnnotator) putIdleConn(err error) { + var attrs []trace.Attribute + if err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.put_idle_connection.error", err.Error())) + } + s.sp.Annotate(attrs, "PutIdleConn") +} + +func (s spanAnnotator) gotFirstResponseByte() { + s.sp.Annotate(nil, "GotFirstResponseByte") +} + +func (s spanAnnotator) got100Continue() { + s.sp.Annotate(nil, "Got100Continue") +} + +func (s spanAnnotator) dnsStart(info httptrace.DNSStartInfo) { + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.dns_start.host", info.Host), + } + s.sp.Annotate(attrs, "DNSStart") +} + +func (s spanAnnotator) dnsDone(info httptrace.DNSDoneInfo) { + var addrs []string + for _, addr := range info.Addrs { + addrs = append(addrs, addr.String()) + } + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.dns_done.addrs", strings.Join(addrs, " , ")), + } + if info.Err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.dns_done.error", info.Err.Error())) + } + s.sp.Annotate(attrs, "DNSDone") +} + +func (s spanAnnotator) connectStart(network, addr string) { + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.connect_start.network", network), + trace.StringAttribute("httptrace.connect_start.addr", addr), + } + s.sp.Annotate(attrs, "ConnectStart") +} + +func (s spanAnnotator) connectDone(network, addr string, err error) { + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.connect_done.network", network), + trace.StringAttribute("httptrace.connect_done.addr", addr), + } + if err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.connect_done.error", err.Error())) + } + s.sp.Annotate(attrs, "ConnectDone") +} + +func (s spanAnnotator) tlsHandshakeStart() { + s.sp.Annotate(nil, "TLSHandshakeStart") +} + +func (s spanAnnotator) tlsHandshakeDone(_ tls.ConnectionState, err error) { + var attrs []trace.Attribute + if err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.tls_handshake_done.error", err.Error())) + } + s.sp.Annotate(attrs, "TLSHandshakeDone") +} + +func (s spanAnnotator) wroteHeaders() { + s.sp.Annotate(nil, "WroteHeaders") +} + +func (s spanAnnotator) wait100Continue() { + s.sp.Annotate(nil, "Wait100Continue") +} + +func (s spanAnnotator) wroteRequest(info httptrace.WroteRequestInfo) { + var attrs []trace.Attribute + if info.Err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.wrote_request.error", info.Err.Error())) + } + s.sp.Annotate(attrs, "WroteRequest") +} diff --git a/vendor/go.opencensus.io/plugin/ochttp/stats.go b/vendor/go.opencensus.io/plugin/ochttp/stats.go index 19a882500e..63bbcda5e3 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/stats.go +++ b/vendor/go.opencensus.io/plugin/ochttp/stats.go @@ -20,20 +20,67 @@ import ( "go.opencensus.io/tag" ) +// Deprecated: client HTTP measures. +var ( + // Deprecated: Use a Count aggregation over one of the other client measures to achieve the same effect. + ClientRequestCount = stats.Int64( + "opencensus.io/http/client/request_count", + "Number of HTTP requests started", + stats.UnitDimensionless) + // Deprecated: Use ClientSentBytes. + ClientRequestBytes = stats.Int64( + "opencensus.io/http/client/request_bytes", + "HTTP request body size if set as ContentLength (uncompressed)", + stats.UnitBytes) + // Deprecated: Use ClientReceivedBytes. + ClientResponseBytes = stats.Int64( + "opencensus.io/http/client/response_bytes", + "HTTP response body size (uncompressed)", + stats.UnitBytes) + // Deprecated: Use ClientRoundtripLatency. + ClientLatency = stats.Float64( + "opencensus.io/http/client/latency", + "End-to-end latency", + stats.UnitMilliseconds) +) + // The following client HTTP measures are supported for use in custom views. var ( - ClientRequestCount = stats.Int64("opencensus.io/http/client/request_count", "Number of HTTP requests started", stats.UnitDimensionless) - ClientRequestBytes = stats.Int64("opencensus.io/http/client/request_bytes", "HTTP request body size if set as ContentLength (uncompressed)", stats.UnitBytes) - ClientResponseBytes = stats.Int64("opencensus.io/http/client/response_bytes", "HTTP response body size (uncompressed)", stats.UnitBytes) - ClientLatency = stats.Float64("opencensus.io/http/client/latency", "End-to-end latency", stats.UnitMilliseconds) + ClientSentBytes = stats.Int64( + "opencensus.io/http/client/sent_bytes", + "Total bytes sent in request body (not including headers)", + stats.UnitBytes, + ) + ClientReceivedBytes = stats.Int64( + "opencensus.io/http/client/received_bytes", + "Total bytes received in response bodies (not including headers but including error responses with bodies)", + stats.UnitBytes, + ) + ClientRoundtripLatency = stats.Float64( + "opencensus.io/http/client/roundtrip_latency", + "Time between first byte of request headers sent to last byte of response received, or terminal error", + stats.UnitMilliseconds, + ) ) // The following server HTTP measures are supported for use in custom views: var ( - ServerRequestCount = stats.Int64("opencensus.io/http/server/request_count", "Number of HTTP requests started", stats.UnitDimensionless) - ServerRequestBytes = stats.Int64("opencensus.io/http/server/request_bytes", "HTTP request body size if set as ContentLength (uncompressed)", stats.UnitBytes) - ServerResponseBytes = stats.Int64("opencensus.io/http/server/response_bytes", "HTTP response body size (uncompressed)", stats.UnitBytes) - ServerLatency = stats.Float64("opencensus.io/http/server/latency", "End-to-end latency", stats.UnitMilliseconds) + ServerRequestCount = stats.Int64( + "opencensus.io/http/server/request_count", + "Number of HTTP requests started", + stats.UnitDimensionless) + ServerRequestBytes = stats.Int64( + "opencensus.io/http/server/request_bytes", + "HTTP request body size if set as ContentLength (uncompressed)", + stats.UnitBytes) + ServerResponseBytes = stats.Int64( + "opencensus.io/http/server/response_bytes", + "HTTP response body size (uncompressed)", + stats.UnitBytes) + ServerLatency = stats.Float64( + "opencensus.io/http/server/latency", + "End-to-end latency", + stats.UnitMilliseconds) ) // The following tags are applied to stats recorded by this package. Host, Path @@ -60,17 +107,70 @@ var ( // Method is the HTTP method of the request, capitalized (GET, POST, etc.). Method, _ = tag.NewKey("http.method") + + // KeyServerRoute is a low cardinality string representing the logical + // handler of the request. This is usually the pattern registered on the a + // ServeMux (or similar string). + KeyServerRoute, _ = tag.NewKey("http_server_route") +) + +// Client tag keys. +var ( + // KeyClientMethod is the HTTP method, capitalized (i.e. GET, POST, PUT, DELETE, etc.). + KeyClientMethod, _ = tag.NewKey("http_client_method") + // KeyClientPath is the URL path (not including query string). + KeyClientPath, _ = tag.NewKey("http_client_path") + // KeyClientStatus is the HTTP status code as an integer (e.g. 200, 404, 500.), or "error" if no response status line was received. + KeyClientStatus, _ = tag.NewKey("http_client_status") + // KeyClientHost is the value of the request Host header. + KeyClientHost, _ = tag.NewKey("http_client_host") ) // Default distributions used by views in this package. var ( - DefaultSizeDistribution = view.Distribution(0, 1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296) - DefaultLatencyDistribution = view.Distribution(0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) + DefaultSizeDistribution = view.Distribution(1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296) + DefaultLatencyDistribution = view.Distribution(1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) +) + +// Package ochttp provides some convenience views for client measures. +// You still need to register these views for data to actually be collected. +var ( + ClientSentBytesDistribution = &view.View{ + Name: "opencensus.io/http/client/sent_bytes", + Measure: ClientSentBytes, + Aggregation: DefaultSizeDistribution, + Description: "Total bytes sent in request body (not including headers), by HTTP method and response status", + TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, + } + + ClientReceivedBytesDistribution = &view.View{ + Name: "opencensus.io/http/client/received_bytes", + Measure: ClientReceivedBytes, + Aggregation: DefaultSizeDistribution, + Description: "Total bytes received in response bodies (not including headers but including error responses with bodies), by HTTP method and response status", + TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, + } + + ClientRoundtripLatencyDistribution = &view.View{ + Name: "opencensus.io/http/client/roundtrip_latency", + Measure: ClientRoundtripLatency, + Aggregation: DefaultLatencyDistribution, + Description: "End-to-end latency, by HTTP method and response status", + TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, + } + + ClientCompletedCount = &view.View{ + Name: "opencensus.io/http/client/completed_count", + Measure: ClientRoundtripLatency, + Aggregation: view.Count(), + Description: "Count of completed requests, by HTTP method and response status", + TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, + } ) -// Package ochttp provides some convenience views. -// You need to register the views for data to actually be collected. +// Deprecated: Old client Views. var ( + // Deprecated: No direct replacement, but see ClientCompletedCount. ClientRequestCountView = &view.View{ Name: "opencensus.io/http/client/request_count", Description: "Count of HTTP requests started", @@ -78,43 +178,52 @@ var ( Aggregation: view.Count(), } + // Deprecated: Use ClientSentBytesDistribution. ClientRequestBytesView = &view.View{ Name: "opencensus.io/http/client/request_bytes", Description: "Size distribution of HTTP request body", - Measure: ClientRequestBytes, + Measure: ClientSentBytes, Aggregation: DefaultSizeDistribution, } + // Deprecated: Use ClientReceivedBytesDistribution instead. ClientResponseBytesView = &view.View{ Name: "opencensus.io/http/client/response_bytes", Description: "Size distribution of HTTP response body", - Measure: ClientResponseBytes, + Measure: ClientReceivedBytes, Aggregation: DefaultSizeDistribution, } + // Deprecated: Use ClientRoundtripLatencyDistribution instead. ClientLatencyView = &view.View{ Name: "opencensus.io/http/client/latency", Description: "Latency distribution of HTTP requests", - Measure: ClientLatency, + Measure: ClientRoundtripLatency, Aggregation: DefaultLatencyDistribution, } + // Deprecated: Use ClientCompletedCount instead. ClientRequestCountByMethod = &view.View{ Name: "opencensus.io/http/client/request_count_by_method", Description: "Client request count by HTTP method", TagKeys: []tag.Key{Method}, - Measure: ClientRequestCount, + Measure: ClientSentBytes, Aggregation: view.Count(), } + // Deprecated: Use ClientCompletedCount instead. ClientResponseCountByStatusCode = &view.View{ Name: "opencensus.io/http/client/response_count_by_status_code", Description: "Client response count by status code", TagKeys: []tag.Key{StatusCode}, - Measure: ClientLatency, + Measure: ClientRoundtripLatency, Aggregation: view.Count(), } +) +// Package ochttp provides some convenience views for server measures. +// You still need to register these views for data to actually be collected. +var ( ServerRequestCountView = &view.View{ Name: "opencensus.io/http/server/request_count", Description: "Count of HTTP requests started", @@ -161,6 +270,7 @@ var ( ) // DefaultClientViews are the default client views provided by this package. +// Deprecated: No replacement. Register the views you would like individually. var DefaultClientViews = []*view.View{ ClientRequestCountView, ClientRequestBytesView, @@ -171,6 +281,7 @@ var DefaultClientViews = []*view.View{ } // DefaultServerViews are the default server views provided by this package. +// Deprecated: No replacement. Register the views you would like individually. var DefaultServerViews = []*view.View{ ServerRequestCountView, ServerRequestBytesView, diff --git a/vendor/go.opencensus.io/plugin/ochttp/trace.go b/vendor/go.opencensus.io/plugin/ochttp/trace.go index ea066a2c60..c23b97fb1f 100644 --- a/vendor/go.opencensus.io/plugin/ochttp/trace.go +++ b/vendor/go.opencensus.io/plugin/ochttp/trace.go @@ -17,6 +17,7 @@ package ochttp import ( "io" "net/http" + "net/http/httptrace" "go.opencensus.io/plugin/ochttp/propagation/b3" "go.opencensus.io/trace" @@ -33,6 +34,7 @@ const ( HostAttribute = "http.host" MethodAttribute = "http.method" PathAttribute = "http.path" + URLAttribute = "http.url" UserAgentAttribute = "http.user_agent" StatusCodeAttribute = "http.status_code" ) @@ -42,6 +44,7 @@ type traceTransport struct { startOptions trace.StartOptions format propagation.HTTPFormat formatSpanName func(*http.Request) string + newClientTrace func(*http.Request, *trace.Span) *httptrace.ClientTrace } // TODO(jbd): Add message events for request and response size. @@ -53,12 +56,27 @@ func (t *traceTransport) RoundTrip(req *http.Request) (*http.Response, error) { name := t.formatSpanName(req) // TODO(jbd): Discuss whether we want to prefix // outgoing requests with Sent. - _, span := trace.StartSpan(req.Context(), name, + ctx, span := trace.StartSpan(req.Context(), name, trace.WithSampler(t.startOptions.Sampler), trace.WithSpanKind(trace.SpanKindClient)) - req = req.WithContext(trace.WithSpan(req.Context(), span)) + if t.newClientTrace != nil { + req = req.WithContext(httptrace.WithClientTrace(ctx, t.newClientTrace(req, span))) + } else { + req = req.WithContext(ctx) + } + if t.format != nil { + // SpanContextToRequest will modify its Request argument, which is + // contrary to the contract for http.RoundTripper, so we need to + // pass it a copy of the Request. + // However, the Request struct itself was already copied by + // the WithContext calls above and so we just need to copy the header. + header := make(http.Header) + for k, v := range req.Header { + header[k] = v + } + req.Header = header t.format.SpanContextToRequest(span.SpanContext(), req) } @@ -76,7 +94,8 @@ func (t *traceTransport) RoundTrip(req *http.Request) (*http.Response, error) { // span.End() will be invoked after // a read from resp.Body returns io.EOF or when // resp.Body.Close() is invoked. - resp.Body = &bodyTracker{rc: resp.Body, span: span} + bt := &bodyTracker{rc: resp.Body, span: span} + resp.Body = wrappedBody(bt, resp.Body) return resp, err } @@ -132,12 +151,21 @@ func spanNameFromURL(req *http.Request) string { } func requestAttrs(r *http.Request) []trace.Attribute { - return []trace.Attribute{ + userAgent := r.UserAgent() + + attrs := make([]trace.Attribute, 0, 5) + attrs = append(attrs, trace.StringAttribute(PathAttribute, r.URL.Path), - trace.StringAttribute(HostAttribute, r.URL.Host), + trace.StringAttribute(URLAttribute, r.URL.String()), + trace.StringAttribute(HostAttribute, r.Host), trace.StringAttribute(MethodAttribute, r.Method), - trace.StringAttribute(UserAgentAttribute, r.UserAgent()), + ) + + if userAgent != "" { + attrs = append(attrs, trace.StringAttribute(UserAgentAttribute, userAgent)) } + + return attrs } func responseAttrs(resp *http.Response) []trace.Attribute { @@ -179,21 +207,33 @@ func TraceStatus(httpStatusCode int, statusLine string) trace.Status { } var codeToStr = map[int32]string{ - trace.StatusCodeOK: `"OK"`, - trace.StatusCodeCancelled: `"CANCELLED"`, - trace.StatusCodeUnknown: `"UNKNOWN"`, - trace.StatusCodeInvalidArgument: `"INVALID_ARGUMENT"`, - trace.StatusCodeDeadlineExceeded: `"DEADLINE_EXCEEDED"`, - trace.StatusCodeNotFound: `"NOT_FOUND"`, - trace.StatusCodeAlreadyExists: `"ALREADY_EXISTS"`, - trace.StatusCodePermissionDenied: `"PERMISSION_DENIED"`, - trace.StatusCodeResourceExhausted: `"RESOURCE_EXHAUSTED"`, - trace.StatusCodeFailedPrecondition: `"FAILED_PRECONDITION"`, - trace.StatusCodeAborted: `"ABORTED"`, - trace.StatusCodeOutOfRange: `"OUT_OF_RANGE"`, - trace.StatusCodeUnimplemented: `"UNIMPLEMENTED"`, - trace.StatusCodeInternal: `"INTERNAL"`, - trace.StatusCodeUnavailable: `"UNAVAILABLE"`, - trace.StatusCodeDataLoss: `"DATA_LOSS"`, - trace.StatusCodeUnauthenticated: `"UNAUTHENTICATED"`, + trace.StatusCodeOK: `OK`, + trace.StatusCodeCancelled: `CANCELLED`, + trace.StatusCodeUnknown: `UNKNOWN`, + trace.StatusCodeInvalidArgument: `INVALID_ARGUMENT`, + trace.StatusCodeDeadlineExceeded: `DEADLINE_EXCEEDED`, + trace.StatusCodeNotFound: `NOT_FOUND`, + trace.StatusCodeAlreadyExists: `ALREADY_EXISTS`, + trace.StatusCodePermissionDenied: `PERMISSION_DENIED`, + trace.StatusCodeResourceExhausted: `RESOURCE_EXHAUSTED`, + trace.StatusCodeFailedPrecondition: `FAILED_PRECONDITION`, + trace.StatusCodeAborted: `ABORTED`, + trace.StatusCodeOutOfRange: `OUT_OF_RANGE`, + trace.StatusCodeUnimplemented: `UNIMPLEMENTED`, + trace.StatusCodeInternal: `INTERNAL`, + trace.StatusCodeUnavailable: `UNAVAILABLE`, + trace.StatusCodeDataLoss: `DATA_LOSS`, + trace.StatusCodeUnauthenticated: `UNAUTHENTICATED`, +} + +func isHealthEndpoint(path string) bool { + // Health checking is pretty frequent and + // traces collected for health endpoints + // can be extremely noisy and expensive. + // Disable canonical health checking endpoints + // like /healthz and /_ah/health for now. + if path == "/healthz" || path == "/_ah/health" { + return true + } + return false } diff --git a/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go b/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go new file mode 100644 index 0000000000..7d75cae2b1 --- /dev/null +++ b/vendor/go.opencensus.io/plugin/ochttp/wrapped_body.go @@ -0,0 +1,44 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "io" +) + +// wrappedBody returns a wrapped version of the original +// Body and only implements the same combination of additional +// interfaces as the original. +func wrappedBody(wrapper io.ReadCloser, body io.ReadCloser) io.ReadCloser { + var ( + wr, i0 = body.(io.Writer) + ) + switch { + case !i0: + return struct { + io.ReadCloser + }{wrapper} + + case i0: + return struct { + io.ReadCloser + io.Writer + }{wrapper, wr} + default: + return struct { + io.ReadCloser + }{wrapper} + } +} diff --git a/vendor/go.opencensus.io/resource/resource.go b/vendor/go.opencensus.io/resource/resource.go new file mode 100644 index 0000000000..b1764e1d3b --- /dev/null +++ b/vendor/go.opencensus.io/resource/resource.go @@ -0,0 +1,164 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package resource provides functionality for resource, which capture +// identifying information about the entities for which signals are exported. +package resource + +import ( + "context" + "fmt" + "os" + "regexp" + "sort" + "strconv" + "strings" +) + +// Environment variables used by FromEnv to decode a resource. +const ( + EnvVarType = "OC_RESOURCE_TYPE" + EnvVarLabels = "OC_RESOURCE_LABELS" +) + +// Resource describes an entity about which identifying information and metadata is exposed. +// For example, a type "k8s.io/container" may hold labels describing the pod name and namespace. +type Resource struct { + Type string + Labels map[string]string +} + +// EncodeLabels encodes a labels map to a string as provided via the OC_RESOURCE_LABELS environment variable. +func EncodeLabels(labels map[string]string) string { + sortedKeys := make([]string, 0, len(labels)) + for k := range labels { + sortedKeys = append(sortedKeys, k) + } + sort.Strings(sortedKeys) + + s := "" + for i, k := range sortedKeys { + if i > 0 { + s += "," + } + s += k + "=" + strconv.Quote(labels[k]) + } + return s +} + +var labelRegex = regexp.MustCompile(`^\s*([[:ascii:]]{1,256}?)=("[[:ascii:]]{0,256}?")\s*,`) + +// DecodeLabels decodes a serialized label map as used in the OC_RESOURCE_LABELS variable. +// A list of labels of the form `="",="",...` is accepted. +// Domain names and paths are accepted as label keys. +// Most users will want to use FromEnv instead. +func DecodeLabels(s string) (map[string]string, error) { + m := map[string]string{} + // Ensure a trailing comma, which allows us to keep the regex simpler + s = strings.TrimRight(strings.TrimSpace(s), ",") + "," + + for len(s) > 0 { + match := labelRegex.FindStringSubmatch(s) + if len(match) == 0 { + return nil, fmt.Errorf("invalid label formatting, remainder: %s", s) + } + v := match[2] + if v == "" { + v = match[3] + } else { + var err error + if v, err = strconv.Unquote(v); err != nil { + return nil, fmt.Errorf("invalid label formatting, remainder: %s, err: %s", s, err) + } + } + m[match[1]] = v + + s = s[len(match[0]):] + } + return m, nil +} + +// FromEnv is a detector that loads resource information from the OC_RESOURCE_TYPE +// and OC_RESOURCE_labelS environment variables. +func FromEnv(context.Context) (*Resource, error) { + res := &Resource{ + Type: strings.TrimSpace(os.Getenv(EnvVarType)), + } + labels := strings.TrimSpace(os.Getenv(EnvVarLabels)) + if labels == "" { + return res, nil + } + var err error + if res.Labels, err = DecodeLabels(labels); err != nil { + return nil, err + } + return res, nil +} + +var _ Detector = FromEnv + +// merge resource information from b into a. In case of a collision, a takes precedence. +func merge(a, b *Resource) *Resource { + if a == nil { + return b + } + if b == nil { + return a + } + res := &Resource{ + Type: a.Type, + Labels: map[string]string{}, + } + if res.Type == "" { + res.Type = b.Type + } + for k, v := range b.Labels { + res.Labels[k] = v + } + // Labels from resource a overwrite labels from resource b. + for k, v := range a.Labels { + res.Labels[k] = v + } + return res +} + +// Detector attempts to detect resource information. +// If the detector cannot find resource information, the returned resource is nil but no +// error is returned. +// An error is only returned on unexpected failures. +type Detector func(context.Context) (*Resource, error) + +// MultiDetector returns a Detector that calls all input detectors in order and +// merges each result with the previous one. In case a type of label key is already set, +// the first set value is takes precedence. +// It returns on the first error that a sub-detector encounters. +func MultiDetector(detectors ...Detector) Detector { + return func(ctx context.Context) (*Resource, error) { + return detectAll(ctx, detectors...) + } +} + +// detectall calls all input detectors sequentially an merges each result with the previous one. +// It returns on the first error that a sub-detector encounters. +func detectAll(ctx context.Context, detectors ...Detector) (*Resource, error) { + var res *Resource + for _, d := range detectors { + r, err := d(ctx) + if err != nil { + return nil, err + } + res = merge(res, r) + } + return res, nil +} diff --git a/vendor/go.opencensus.io/stats/doc.go b/vendor/go.opencensus.io/stats/doc.go index 7a8a62c143..00d473ee02 100644 --- a/vendor/go.opencensus.io/stats/doc.go +++ b/vendor/go.opencensus.io/stats/doc.go @@ -21,35 +21,49 @@ aggregate the collected data, and export the aggregated data. Measures -A measure represents a type of metric to be tracked and recorded. +A measure represents a type of data point to be tracked and recorded. For example, latency, request Mb/s, and response Mb/s are measures to collect from a server. -Each measure needs to be registered before being used. Measure -constructors such as Int64 and Float64 automatically +Measure constructors such as Int64 and Float64 automatically register the measure by the given name. Each registered measure needs to be unique by name. Measures also have a description and a unit. -Libraries can define and export measures for their end users to -create views and collect instrumentation data. +Libraries can define and export measures. Application authors can then +create views and collect and break down measures by the tags they are +interested in. Recording measurements Measurement is a data point to be collected for a measure. For example, for a latency (ms) measure, 100 is a measurement that represents a 100ms -latency event. Users collect data points on the existing measures with +latency event. Measurements are created from measures with the current context. Tags from the current context are recorded with the measurements if they are any. -Recorded measurements are dropped immediately if user is not aggregating -them via views. Users don't necessarily need to conditionally enable/disable +Recorded measurements are dropped immediately if no views are registered for them. +There is usually no need to conditionally enable and disable recording to reduce cost. Recording of measurements is cheap. -Libraries can always record measurements, and end-users can later decide +Libraries can always record measurements, and applications can later decide on which measurements they want to collect by registering views. This allows libraries to turn on the instrumentation by default. + +Exemplars + +For a given recorded measurement, the associated exemplar is a diagnostic map +that gives more information about the measurement. + +When aggregated using a Distribution aggregation, an exemplar is kept for each +bucket in the Distribution. This allows you to easily find an example of a +measurement that fell into each bucket. + +For example, if you also use the OpenCensus trace package and you +record a measurement with a context that contains a sampled trace span, +then the trace span will be added to the exemplar associated with the measurement. + +When exported to a supporting back end, you should be able to easily navigate +to example traces that fell into each bucket in the Distribution. + */ package stats // import "go.opencensus.io/stats" - -// TODO(acetechnologist): Add a link to the language independent OpenCensus -// spec when it is available. diff --git a/vendor/go.opencensus.io/stats/internal/record.go b/vendor/go.opencensus.io/stats/internal/record.go index 6341eb2ad7..36935e629b 100644 --- a/vendor/go.opencensus.io/stats/internal/record.go +++ b/vendor/go.opencensus.io/stats/internal/record.go @@ -19,7 +19,7 @@ import ( ) // DefaultRecorder will be called for each Record call. -var DefaultRecorder func(*tag.Map, interface{}) +var DefaultRecorder func(tags *tag.Map, measurement interface{}, attachments map[string]interface{}) // SubscriptionReporter reports when a view subscribed with a measure. var SubscriptionReporter func(measure string) diff --git a/vendor/go.opencensus.io/stats/measure.go b/vendor/go.opencensus.io/stats/measure.go index 7b4b49c67a..1ffd3cefc7 100644 --- a/vendor/go.opencensus.io/stats/measure.go +++ b/vendor/go.opencensus.io/stats/measure.go @@ -93,8 +93,9 @@ func registerMeasureHandle(name, desc, unit string) *measureDescriptor { // provides methods to create measurements of their kind. For example, Int64Measure // provides M to convert an int64 into a measurement. type Measurement struct { - v float64 - m Measure + v float64 + m Measure + desc *measureDescriptor } // Value returns the value of the Measurement as a float64. diff --git a/vendor/go.opencensus.io/stats/measure_float64.go b/vendor/go.opencensus.io/stats/measure_float64.go index da4b5a83ba..f02c1eda84 100644 --- a/vendor/go.opencensus.io/stats/measure_float64.go +++ b/vendor/go.opencensus.io/stats/measure_float64.go @@ -17,31 +17,17 @@ package stats // Float64Measure is a measure for float64 values. type Float64Measure struct { - md *measureDescriptor -} - -// Name returns the name of the measure. -func (m *Float64Measure) Name() string { - return m.md.name -} - -// Description returns the description of the measure. -func (m *Float64Measure) Description() string { - return m.md.description -} - -// Unit returns the unit of the measure. -func (m *Float64Measure) Unit() string { - return m.md.unit + desc *measureDescriptor } // M creates a new float64 measurement. // Use Record to record measurements. func (m *Float64Measure) M(v float64) Measurement { - if !m.md.subscribed() { - return Measurement{} + return Measurement{ + m: m, + desc: m.desc, + v: v, } - return Measurement{m: m, v: v} } // Float64 creates a new measure for float64 values. @@ -52,3 +38,18 @@ func Float64(name, description, unit string) *Float64Measure { mi := registerMeasureHandle(name, description, unit) return &Float64Measure{mi} } + +// Name returns the name of the measure. +func (m *Float64Measure) Name() string { + return m.desc.name +} + +// Description returns the description of the measure. +func (m *Float64Measure) Description() string { + return m.desc.description +} + +// Unit returns the unit of the measure. +func (m *Float64Measure) Unit() string { + return m.desc.unit +} diff --git a/vendor/go.opencensus.io/stats/measure_int64.go b/vendor/go.opencensus.io/stats/measure_int64.go index 5fedaad05f..d101d79735 100644 --- a/vendor/go.opencensus.io/stats/measure_int64.go +++ b/vendor/go.opencensus.io/stats/measure_int64.go @@ -17,31 +17,17 @@ package stats // Int64Measure is a measure for int64 values. type Int64Measure struct { - md *measureDescriptor -} - -// Name returns the name of the measure. -func (m *Int64Measure) Name() string { - return m.md.name -} - -// Description returns the description of the measure. -func (m *Int64Measure) Description() string { - return m.md.description -} - -// Unit returns the unit of the measure. -func (m *Int64Measure) Unit() string { - return m.md.unit + desc *measureDescriptor } // M creates a new int64 measurement. // Use Record to record measurements. func (m *Int64Measure) M(v int64) Measurement { - if !m.md.subscribed() { - return Measurement{} + return Measurement{ + m: m, + desc: m.desc, + v: float64(v), } - return Measurement{m: m, v: float64(v)} } // Int64 creates a new measure for int64 values. @@ -52,3 +38,18 @@ func Int64(name, description, unit string) *Int64Measure { mi := registerMeasureHandle(name, description, unit) return &Int64Measure{mi} } + +// Name returns the name of the measure. +func (m *Int64Measure) Name() string { + return m.desc.name +} + +// Description returns the description of the measure. +func (m *Int64Measure) Description() string { + return m.desc.description +} + +// Unit returns the unit of the measure. +func (m *Int64Measure) Unit() string { + return m.desc.unit +} diff --git a/vendor/go.opencensus.io/stats/record.go b/vendor/go.opencensus.io/stats/record.go index 98865ff697..d2af0a60da 100644 --- a/vendor/go.opencensus.io/stats/record.go +++ b/vendor/go.opencensus.io/stats/record.go @@ -30,15 +30,19 @@ func init() { } } -// Record records one or multiple measurements with the same tags at once. +// Record records one or multiple measurements with the same context at once. // If there are any tags in the context, measurements will be tagged with them. func Record(ctx context.Context, ms ...Measurement) { + recorder := internal.DefaultRecorder + if recorder == nil { + return + } if len(ms) == 0 { return } - var record bool + record := false for _, m := range ms { - if (m != Measurement{}) { + if m.desc.subscribed() { record = true break } @@ -46,7 +50,20 @@ func Record(ctx context.Context, ms ...Measurement) { if !record { return } - if internal.DefaultRecorder != nil { - internal.DefaultRecorder(tag.FromContext(ctx), ms) + // TODO(songy23): fix attachments. + recorder(tag.FromContext(ctx), ms, map[string]interface{}{}) +} + +// RecordWithTags records one or multiple measurements at once. +// +// Measurements will be tagged with the tags in the context mutated by the mutators. +// RecordWithTags is useful if you want to record with tag mutations but don't want +// to propagate the mutations in the context. +func RecordWithTags(ctx context.Context, mutators []tag.Mutator, ms ...Measurement) error { + ctx, err := tag.New(ctx, mutators...) + if err != nil { + return err } + Record(ctx, ms...) + return nil } diff --git a/vendor/go.opencensus.io/stats/view/aggregation_data.go b/vendor/go.opencensus.io/stats/view/aggregation_data.go index 88c500bff9..d500e67f73 100644 --- a/vendor/go.opencensus.io/stats/view/aggregation_data.go +++ b/vendor/go.opencensus.io/stats/view/aggregation_data.go @@ -17,6 +17,9 @@ package view import ( "math" + "time" + + "go.opencensus.io/metric/metricdata" ) // AggregationData represents an aggregated value from a collection. @@ -24,9 +27,10 @@ import ( // Mosts users won't directly access aggregration data. type AggregationData interface { isAggregationData() bool - addSample(v float64) + addSample(v float64, attachments map[string]interface{}, t time.Time) clone() AggregationData equal(other AggregationData) bool + toPoint(t metricdata.Type, time time.Time) metricdata.Point } const epsilon = 1e-9 @@ -41,7 +45,7 @@ type CountData struct { func (a *CountData) isAggregationData() bool { return true } -func (a *CountData) addSample(v float64) { +func (a *CountData) addSample(_ float64, _ map[string]interface{}, _ time.Time) { a.Value = a.Value + 1 } @@ -58,6 +62,15 @@ func (a *CountData) equal(other AggregationData) bool { return a.Value == a2.Value } +func (a *CountData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { + switch metricType { + case metricdata.TypeCumulativeInt64: + return metricdata.NewInt64Point(t, a.Value) + default: + panic("unsupported metricdata.Type") + } +} + // SumData is the aggregated data for the Sum aggregation. // A sum aggregation processes data and sums up the recordings. // @@ -68,8 +81,8 @@ type SumData struct { func (a *SumData) isAggregationData() bool { return true } -func (a *SumData) addSample(f float64) { - a.Value += f +func (a *SumData) addSample(v float64, _ map[string]interface{}, _ time.Time) { + a.Value += v } func (a *SumData) clone() AggregationData { @@ -84,26 +97,45 @@ func (a *SumData) equal(other AggregationData) bool { return math.Pow(a.Value-a2.Value, 2) < epsilon } +func (a *SumData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { + switch metricType { + case metricdata.TypeCumulativeInt64: + return metricdata.NewInt64Point(t, int64(a.Value)) + case metricdata.TypeCumulativeFloat64: + return metricdata.NewFloat64Point(t, a.Value) + default: + panic("unsupported metricdata.Type") + } +} + // DistributionData is the aggregated data for the // Distribution aggregation. // // Most users won't directly access distribution data. +// +// For a distribution with N bounds, the associated DistributionData will have +// N+1 buckets. type DistributionData struct { - Count int64 // number of data points aggregated - Min float64 // minimum value in the distribution - Max float64 // max value in the distribution - Mean float64 // mean of the distribution - SumOfSquaredDev float64 // sum of the squared deviation from the mean - CountPerBucket []int64 // number of occurrences per bucket - bounds []float64 // histogram distribution of the values + Count int64 // number of data points aggregated + Min float64 // minimum value in the distribution + Max float64 // max value in the distribution + Mean float64 // mean of the distribution + SumOfSquaredDev float64 // sum of the squared deviation from the mean + CountPerBucket []int64 // number of occurrences per bucket + // ExemplarsPerBucket is slice the same length as CountPerBucket containing + // an exemplar for the associated bucket, or nil. + ExemplarsPerBucket []*metricdata.Exemplar + bounds []float64 // histogram distribution of the values } func newDistributionData(bounds []float64) *DistributionData { + bucketCount := len(bounds) + 1 return &DistributionData{ - CountPerBucket: make([]int64, len(bounds)+1), - bounds: bounds, - Min: math.MaxFloat64, - Max: math.SmallestNonzeroFloat64, + CountPerBucket: make([]int64, bucketCount), + ExemplarsPerBucket: make([]*metricdata.Exemplar, bucketCount), + bounds: bounds, + Min: math.MaxFloat64, + Max: math.SmallestNonzeroFloat64, } } @@ -119,46 +151,62 @@ func (a *DistributionData) variance() float64 { func (a *DistributionData) isAggregationData() bool { return true } -func (a *DistributionData) addSample(f float64) { - if f < a.Min { - a.Min = f +// TODO(songy23): support exemplar attachments. +func (a *DistributionData) addSample(v float64, attachments map[string]interface{}, t time.Time) { + if v < a.Min { + a.Min = v } - if f > a.Max { - a.Max = f + if v > a.Max { + a.Max = v } a.Count++ - a.incrementBucketCount(f) + a.addToBucket(v, attachments, t) if a.Count == 1 { - a.Mean = f + a.Mean = v return } oldMean := a.Mean - a.Mean = a.Mean + (f-a.Mean)/float64(a.Count) - a.SumOfSquaredDev = a.SumOfSquaredDev + (f-oldMean)*(f-a.Mean) + a.Mean = a.Mean + (v-a.Mean)/float64(a.Count) + a.SumOfSquaredDev = a.SumOfSquaredDev + (v-oldMean)*(v-a.Mean) } -func (a *DistributionData) incrementBucketCount(f float64) { - if len(a.bounds) == 0 { - a.CountPerBucket[0]++ - return +func (a *DistributionData) addToBucket(v float64, attachments map[string]interface{}, t time.Time) { + var count *int64 + var i int + var b float64 + for i, b = range a.bounds { + if v < b { + count = &a.CountPerBucket[i] + break + } + } + if count == nil { // Last bucket. + i = len(a.bounds) + count = &a.CountPerBucket[i] + } + *count++ + if exemplar := getExemplar(v, attachments, t); exemplar != nil { + a.ExemplarsPerBucket[i] = exemplar } +} - for i, b := range a.bounds { - if f < b { - a.CountPerBucket[i]++ - return - } +func getExemplar(v float64, attachments map[string]interface{}, t time.Time) *metricdata.Exemplar { + if len(attachments) == 0 { + return nil + } + return &metricdata.Exemplar{ + Value: v, + Timestamp: t, + Attachments: attachments, } - a.CountPerBucket[len(a.bounds)]++ } func (a *DistributionData) clone() AggregationData { - counts := make([]int64, len(a.CountPerBucket)) - copy(counts, a.CountPerBucket) c := *a - c.CountPerBucket = counts + c.CountPerBucket = append([]int64(nil), a.CountPerBucket...) + c.ExemplarsPerBucket = append([]*metricdata.Exemplar(nil), a.ExemplarsPerBucket...) return &c } @@ -181,6 +229,33 @@ func (a *DistributionData) equal(other AggregationData) bool { return a.Count == a2.Count && a.Min == a2.Min && a.Max == a2.Max && math.Pow(a.Mean-a2.Mean, 2) < epsilon && math.Pow(a.variance()-a2.variance(), 2) < epsilon } +func (a *DistributionData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { + switch metricType { + case metricdata.TypeCumulativeDistribution: + buckets := []metricdata.Bucket{} + for i := 0; i < len(a.CountPerBucket); i++ { + buckets = append(buckets, metricdata.Bucket{ + Count: a.CountPerBucket[i], + Exemplar: a.ExemplarsPerBucket[i], + }) + } + bucketOptions := &metricdata.BucketOptions{Bounds: a.bounds} + + val := &metricdata.Distribution{ + Count: a.Count, + Sum: a.Sum(), + SumOfSquaredDeviation: a.SumOfSquaredDev, + BucketOptions: bucketOptions, + Buckets: buckets, + } + return metricdata.NewDistributionPoint(t, val) + + default: + // TODO: [rghetia] when we have a use case for TypeGaugeDistribution. + panic("unsupported metricdata.Type") + } +} + // LastValueData returns the last value recorded for LastValue aggregation. type LastValueData struct { Value float64 @@ -190,7 +265,7 @@ func (l *LastValueData) isAggregationData() bool { return true } -func (l *LastValueData) addSample(v float64) { +func (l *LastValueData) addSample(v float64, _ map[string]interface{}, _ time.Time) { l.Value = v } @@ -205,3 +280,14 @@ func (l *LastValueData) equal(other AggregationData) bool { } return l.Value == a2.Value } + +func (l *LastValueData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { + switch metricType { + case metricdata.TypeGaugeInt64: + return metricdata.NewInt64Point(t, int64(l.Value)) + case metricdata.TypeGaugeFloat64: + return metricdata.NewFloat64Point(t, l.Value) + default: + panic("unsupported metricdata.Type") + } +} diff --git a/vendor/go.opencensus.io/stats/view/collector.go b/vendor/go.opencensus.io/stats/view/collector.go index 250395db26..8a6a2c0fdc 100644 --- a/vendor/go.opencensus.io/stats/view/collector.go +++ b/vendor/go.opencensus.io/stats/view/collector.go @@ -17,6 +17,7 @@ package view import ( "sort" + "time" "go.opencensus.io/internal/tagencoding" "go.opencensus.io/tag" @@ -31,13 +32,13 @@ type collector struct { a *Aggregation } -func (c *collector) addSample(s string, v float64) { +func (c *collector) addSample(s string, v float64, attachments map[string]interface{}, t time.Time) { aggregator, ok := c.signatures[s] if !ok { aggregator = c.a.newData() c.signatures[s] = aggregator } - aggregator.addSample(v) + aggregator.addSample(v, attachments, t) } // collectRows returns a snapshot of the collected Row values. diff --git a/vendor/go.opencensus.io/stats/view/doc.go b/vendor/go.opencensus.io/stats/view/doc.go index 856fb4e153..dced225c3d 100644 --- a/vendor/go.opencensus.io/stats/view/doc.go +++ b/vendor/go.opencensus.io/stats/view/doc.go @@ -13,33 +13,34 @@ // limitations under the License. // -/* -Package view contains support for collecting and exposing aggregates over stats. - -In order to collect measurements, views need to be defined and registered. -A view allows recorded measurements to be filtered and aggregated over a time window. - -All recorded measurements can be filtered by a list of tags. - -OpenCensus provides several aggregation methods: count, distribution and sum. -Count aggregation only counts the number of measurement points. Distribution -aggregation provides statistical summary of the aggregated data. Sum distribution -sums up the measurement points. Aggregations are cumulative. - -Users can dynamically create and delete views. - -Libraries can export their own views and claim the view names -by registering them themselves. - -Exporting - -Collected and aggregated data can be exported to a metric collection -backend by registering its exporter. - -Multiple exporters can be registered to upload the data to various -different backends. Users need to unregister the exporters once they -no longer are needed. -*/ +// Package view contains support for collecting and exposing aggregates over stats. +// +// In order to collect measurements, views need to be defined and registered. +// A view allows recorded measurements to be filtered and aggregated. +// +// All recorded measurements can be grouped by a list of tags. +// +// OpenCensus provides several aggregation methods: Count, Distribution and Sum. +// +// Count only counts the number of measurement points recorded. +// Distribution provides statistical summary of the aggregated data by counting +// how many recorded measurements fall into each bucket. +// Sum adds up the measurement values. +// LastValue just keeps track of the most recently recorded measurement value. +// All aggregations are cumulative. +// +// Views can be registerd and unregistered at any time during program execution. +// +// Libraries can define views but it is recommended that in most cases registering +// views be left up to applications. +// +// Exporting +// +// Collected and aggregated data can be exported to a metric collection +// backend by registering its exporter. +// +// Multiple exporters can be registered to upload the data to various +// different back ends. package view // import "go.opencensus.io/stats/view" // TODO(acetechnologist): Add a link to the language independent OpenCensus diff --git a/vendor/go.opencensus.io/stats/view/export.go b/vendor/go.opencensus.io/stats/view/export.go index ffd0d1ac70..7cb59718f5 100644 --- a/vendor/go.opencensus.io/stats/view/export.go +++ b/vendor/go.opencensus.io/stats/view/export.go @@ -27,6 +27,9 @@ var ( // Exporter takes a significant amount of time to // process a Data, that work should be done on another goroutine. // +// It is safe to assume that ExportView will not be called concurrently from +// multiple goroutines. +// // The Data should not be modified. type Exporter interface { ExportView(viewData *Data) diff --git a/vendor/go.opencensus.io/stats/view/view.go b/vendor/go.opencensus.io/stats/view/view.go index 22323e2c54..37f88e1d9f 100644 --- a/vendor/go.opencensus.io/stats/view/view.go +++ b/vendor/go.opencensus.io/stats/view/view.go @@ -17,14 +17,15 @@ package view import ( "bytes" + "errors" "fmt" "reflect" "sort" "sync/atomic" "time" + "go.opencensus.io/metric/metricdata" "go.opencensus.io/stats" - "go.opencensus.io/stats/internal" "go.opencensus.io/tag" ) @@ -67,6 +68,11 @@ func (v *View) same(other *View) bool { v.Measure.Name() == other.Measure.Name() } +// ErrNegativeBucketBounds error returned if histogram contains negative bounds. +// +// Deprecated: this should not be public. +var ErrNegativeBucketBounds = errors.New("negative bucket bounds not supported") + // canonicalize canonicalizes v by setting explicit // defaults for Name and Description and sorting the TagKeys func (v *View) canonicalize() error { @@ -88,20 +94,40 @@ func (v *View) canonicalize() error { sort.Slice(v.TagKeys, func(i, j int) bool { return v.TagKeys[i].Name() < v.TagKeys[j].Name() }) + sort.Float64s(v.Aggregation.Buckets) + for _, b := range v.Aggregation.Buckets { + if b < 0 { + return ErrNegativeBucketBounds + } + } + // drop 0 bucket silently. + v.Aggregation.Buckets = dropZeroBounds(v.Aggregation.Buckets...) + return nil } +func dropZeroBounds(bounds ...float64) []float64 { + for i, bound := range bounds { + if bound > 0 { + return bounds[i:] + } + } + return []float64{} +} + // viewInternal is the internal representation of a View. type viewInternal struct { - view *View // view is the canonicalized View definition associated with this view. - subscribed uint32 // 1 if someone is subscribed and data need to be exported, use atomic to access - collector *collector + view *View // view is the canonicalized View definition associated with this view. + subscribed uint32 // 1 if someone is subscribed and data need to be exported, use atomic to access + collector *collector + metricDescriptor *metricdata.Descriptor } func newViewInternal(v *View) (*viewInternal, error) { return &viewInternal{ - view: v, - collector: &collector{make(map[string]AggregationData), v.Aggregation}, + view: v, + collector: &collector{make(map[string]AggregationData), v.Aggregation}, + metricDescriptor: viewToMetricDescriptor(v), }, nil } @@ -127,12 +153,12 @@ func (v *viewInternal) collectedRows() []*Row { return v.collector.collectedRows(v.view.TagKeys) } -func (v *viewInternal) addSample(m *tag.Map, val float64) { +func (v *viewInternal) addSample(m *tag.Map, val float64, attachments map[string]interface{}, t time.Time) { if !v.isSubscribed() { return } sig := string(encodeWithKeys(m, v.view.TagKeys)) - v.collector.addSample(sig, val) + v.collector.addSample(sig, val, attachments, t) } // A Data is a set of rows about usage of the single measure associated @@ -172,11 +198,23 @@ func (r *Row) Equal(other *Row) bool { return reflect.DeepEqual(r.Tags, other.Tags) && r.Data.equal(other.Data) } +const maxNameLength = 255 + +// Returns true if the given string contains only printable characters. +func isPrintable(str string) bool { + for _, r := range str { + if !(r >= ' ' && r <= '~') { + return false + } + } + return true +} + func checkViewName(name string) error { - if len(name) > internal.MaxNameLength { - return fmt.Errorf("view name cannot be larger than %v", internal.MaxNameLength) + if len(name) > maxNameLength { + return fmt.Errorf("view name cannot be larger than %v", maxNameLength) } - if !internal.IsPrintable(name) { + if !isPrintable(name) { return fmt.Errorf("view name needs to be an ASCII string") } return nil diff --git a/vendor/go.opencensus.io/stats/view/view_to_metric.go b/vendor/go.opencensus.io/stats/view/view_to_metric.go new file mode 100644 index 0000000000..284299fafa --- /dev/null +++ b/vendor/go.opencensus.io/stats/view/view_to_metric.go @@ -0,0 +1,131 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "time" + + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/stats" +) + +func getUnit(unit string) metricdata.Unit { + switch unit { + case "1": + return metricdata.UnitDimensionless + case "ms": + return metricdata.UnitMilliseconds + case "By": + return metricdata.UnitBytes + } + return metricdata.UnitDimensionless +} + +func getType(v *View) metricdata.Type { + m := v.Measure + agg := v.Aggregation + + switch agg.Type { + case AggTypeSum: + switch m.(type) { + case *stats.Int64Measure: + return metricdata.TypeCumulativeInt64 + case *stats.Float64Measure: + return metricdata.TypeCumulativeFloat64 + default: + panic("unexpected measure type") + } + case AggTypeDistribution: + return metricdata.TypeCumulativeDistribution + case AggTypeLastValue: + switch m.(type) { + case *stats.Int64Measure: + return metricdata.TypeGaugeInt64 + case *stats.Float64Measure: + return metricdata.TypeGaugeFloat64 + default: + panic("unexpected measure type") + } + case AggTypeCount: + switch m.(type) { + case *stats.Int64Measure: + return metricdata.TypeCumulativeInt64 + case *stats.Float64Measure: + return metricdata.TypeCumulativeInt64 + default: + panic("unexpected measure type") + } + default: + panic("unexpected aggregation type") + } +} + +func getLableKeys(v *View) []string { + labelKeys := []string{} + for _, k := range v.TagKeys { + labelKeys = append(labelKeys, k.Name()) + } + return labelKeys +} + +func viewToMetricDescriptor(v *View) *metricdata.Descriptor { + return &metricdata.Descriptor{ + Name: v.Name, + Description: v.Description, + Unit: getUnit(v.Measure.Unit()), + Type: getType(v), + LabelKeys: getLableKeys(v), + } +} + +func toLabelValues(row *Row) []metricdata.LabelValue { + labelValues := []metricdata.LabelValue{} + for _, tag := range row.Tags { + labelValues = append(labelValues, metricdata.NewLabelValue(tag.Value)) + } + return labelValues +} + +func rowToTimeseries(v *viewInternal, row *Row, now time.Time, startTime time.Time) *metricdata.TimeSeries { + return &metricdata.TimeSeries{ + Points: []metricdata.Point{row.Data.toPoint(v.metricDescriptor.Type, now)}, + LabelValues: toLabelValues(row), + StartTime: startTime, + } +} + +func viewToMetric(v *viewInternal, now time.Time, startTime time.Time) *metricdata.Metric { + if v.metricDescriptor.Type == metricdata.TypeGaugeInt64 || + v.metricDescriptor.Type == metricdata.TypeGaugeFloat64 { + startTime = time.Time{} + } + + rows := v.collectedRows() + if len(rows) == 0 { + return nil + } + + ts := []*metricdata.TimeSeries{} + for _, row := range rows { + ts = append(ts, rowToTimeseries(v, row, now, startTime)) + } + + m := &metricdata.Metric{ + Descriptor: *v.metricDescriptor, + TimeSeries: ts, + } + return m +} diff --git a/vendor/go.opencensus.io/stats/view/worker.go b/vendor/go.opencensus.io/stats/view/worker.go index ce2f86ab61..37279b39e9 100644 --- a/vendor/go.opencensus.io/stats/view/worker.go +++ b/vendor/go.opencensus.io/stats/view/worker.go @@ -17,8 +17,11 @@ package view import ( "fmt" + "sync" "time" + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/metric/metricproducer" "go.opencensus.io/stats" "go.opencensus.io/stats/internal" "go.opencensus.io/tag" @@ -43,6 +46,7 @@ type worker struct { timer *time.Ticker c chan command quit, done chan bool + mu sync.RWMutex } var defaultWorker *worker @@ -64,11 +68,6 @@ func Find(name string) (v *View) { // Register begins collecting data for the given views. // Once a view is registered, it reports data to the registered exporters. func Register(views ...*View) error { - for _, v := range views { - if err := v.canonicalize(); err != nil { - return err - } - } req := ®isterViewReq{ views: views, err: make(chan error), @@ -107,17 +106,23 @@ func RetrieveData(viewName string) ([]*Row, error) { return resp.rows, resp.err } -func record(tags *tag.Map, ms interface{}) { +func record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { req := &recordReq{ - tm: tags, - ms: ms.([]stats.Measurement), + tm: tags, + ms: ms.([]stats.Measurement), + attachments: attachments, + t: time.Now(), } defaultWorker.c <- req } // SetReportingPeriod sets the interval between reporting aggregated views in -// the program. If duration is less than or -// equal to zero, it enables the default behavior. +// the program. If duration is less than or equal to zero, it enables the +// default behavior. +// +// Note: each exporter makes different promises about what the lowest supported +// duration is. For example, the Stackdriver exporter recommends a value no +// lower than 1 minute. Consult each exporter per your needs. func SetReportingPeriod(d time.Duration) { // TODO(acetechnologist): ensure that the duration d is more than a certain // value. e.g. 1s @@ -142,6 +147,9 @@ func newWorker() *worker { } func (w *worker) start() { + prodMgr := metricproducer.GlobalManager() + prodMgr.AddProducer(w) + for { select { case cmd := <-w.c: @@ -158,6 +166,9 @@ func (w *worker) start() { } func (w *worker) stop() { + prodMgr := metricproducer.GlobalManager() + prodMgr.DeleteProducer(w) + w.quit <- true <-w.done } @@ -175,6 +186,8 @@ func (w *worker) getMeasureRef(name string) *measureRef { } func (w *worker) tryRegisterView(v *View) (*viewInternal, error) { + w.mu.Lock() + defer w.mu.Unlock() vi, err := newViewInternal(v) if err != nil { return nil, err @@ -194,26 +207,73 @@ func (w *worker) tryRegisterView(v *View) (*viewInternal, error) { return vi, nil } +func (w *worker) unregisterView(viewName string) { + w.mu.Lock() + defer w.mu.Unlock() + delete(w.views, viewName) +} + +func (w *worker) reportView(v *viewInternal, now time.Time) { + if !v.isSubscribed() { + return + } + rows := v.collectedRows() + _, ok := w.startTimes[v] + if !ok { + w.startTimes[v] = now + } + viewData := &Data{ + View: v.view, + Start: w.startTimes[v], + End: time.Now(), + Rows: rows, + } + exportersMu.Lock() + for e := range exporters { + e.ExportView(viewData) + } + exportersMu.Unlock() +} + func (w *worker) reportUsage(now time.Time) { for _, v := range w.views { - if !v.isSubscribed() { - continue - } - rows := v.collectedRows() - _, ok := w.startTimes[v] - if !ok { - w.startTimes[v] = now - } - viewData := &Data{ - View: v.view, - Start: w.startTimes[v], - End: time.Now(), - Rows: rows, - } - exportersMu.Lock() - for e := range exporters { - e.ExportView(viewData) + w.reportView(v, now) + } +} + +func (w *worker) toMetric(v *viewInternal, now time.Time) *metricdata.Metric { + if !v.isSubscribed() { + return nil + } + + _, ok := w.startTimes[v] + if !ok { + w.startTimes[v] = now + } + + var startTime time.Time + if v.metricDescriptor.Type == metricdata.TypeGaugeInt64 || + v.metricDescriptor.Type == metricdata.TypeGaugeFloat64 { + startTime = time.Time{} + } else { + startTime = w.startTimes[v] + } + + return viewToMetric(v, now, startTime) +} + +// Read reads all view data and returns them as metrics. +// It is typically invoked by metric reader to export stats in metric format. +func (w *worker) Read() []*metricdata.Metric { + w.mu.Lock() + defer w.mu.Unlock() + now := time.Now() + metrics := make([]*metricdata.Metric, 0, len(w.views)) + for _, v := range w.views { + metric := w.toMetric(v, now) + if metric != nil { + metrics = append(metrics, metric) } - exportersMu.Unlock() } + return metrics } diff --git a/vendor/go.opencensus.io/stats/view/worker_commands.go b/vendor/go.opencensus.io/stats/view/worker_commands.go index d0dd00ce76..ba6203a50b 100644 --- a/vendor/go.opencensus.io/stats/view/worker_commands.go +++ b/vendor/go.opencensus.io/stats/view/worker_commands.go @@ -56,6 +56,12 @@ type registerViewReq struct { } func (cmd *registerViewReq) handleCommand(w *worker) { + for _, v := range cmd.views { + if err := v.canonicalize(); err != nil { + cmd.err <- err + return + } + } var errstr []string for _, view := range cmd.views { vi, err := w.tryRegisterView(view) @@ -88,13 +94,16 @@ func (cmd *unregisterFromViewReq) handleCommand(w *worker) { continue } + // Report pending data for this view before removing it. + w.reportView(vi, time.Now()) + vi.unsubscribe() if !vi.isSubscribed() { // this was the last subscription and view is not collecting anymore. // The collected data can be cleared. vi.clearRows() } - delete(w.views, name) + w.unregisterView(name) } cmd.done <- struct{}{} } @@ -137,8 +146,10 @@ func (cmd *retrieveDataReq) handleCommand(w *worker) { // recordReq is the command to record data related to multiple measures // at once. type recordReq struct { - tm *tag.Map - ms []stats.Measurement + tm *tag.Map + ms []stats.Measurement + attachments map[string]interface{} + t time.Time } func (cmd *recordReq) handleCommand(w *worker) { @@ -148,7 +159,7 @@ func (cmd *recordReq) handleCommand(w *worker) { } ref := w.getMeasureRef(m.Measure().Name()) for v := range ref.views { - v.addSample(cmd.tm, m.Value()) + v.addSample(cmd.tm, m.Value(), cmd.attachments, time.Now()) } } } diff --git a/vendor/go.opencensus.io/tag/context.go b/vendor/go.opencensus.io/tag/context.go index ed528bcb3c..b27d1b26b1 100644 --- a/vendor/go.opencensus.io/tag/context.go +++ b/vendor/go.opencensus.io/tag/context.go @@ -15,7 +15,9 @@ package tag -import "context" +import ( + "context" +) // FromContext returns the tag map stored in the context. func FromContext(ctx context.Context) *Map { diff --git a/vendor/go.opencensus.io/tag/map_codec.go b/vendor/go.opencensus.io/tag/map_codec.go index 3e998950c3..e88e727775 100644 --- a/vendor/go.opencensus.io/tag/map_codec.go +++ b/vendor/go.opencensus.io/tag/map_codec.go @@ -162,6 +162,9 @@ func (eg *encoderGRPC) bytes() []byte { // Encode encodes the tag map into a []byte. It is useful to propagate // the tag maps on wire in binary format. func Encode(m *Map) []byte { + if m == nil { + return nil + } eg := &encoderGRPC{ buf: make([]byte, len(m.m)), } diff --git a/vendor/go.opencensus.io/trace/basetypes.go b/vendor/go.opencensus.io/trace/basetypes.go index 01f0f90831..0c54492a2b 100644 --- a/vendor/go.opencensus.io/trace/basetypes.go +++ b/vendor/go.opencensus.io/trace/basetypes.go @@ -59,6 +59,11 @@ func Int64Attribute(key string, value int64) Attribute { return Attribute{key: key, value: value} } +// Float64Attribute returns a float64-valued attribute. +func Float64Attribute(key string, value float64) Attribute { + return Attribute{key: key, value: value} +} + // StringAttribute returns a string-valued attribute. func StringAttribute(key string, value string) Attribute { return Attribute{key: key, value: value} @@ -71,8 +76,8 @@ type LinkType int32 // LinkType values. const ( LinkTypeUnspecified LinkType = iota // The relationship of the two spans is unknown. - LinkTypeChild // The current span is a child of the linked span. - LinkTypeParent // The current span is the parent of the linked span. + LinkTypeChild // The linked span is a child of the current span. + LinkTypeParent // The linked span is the parent of the current span. ) // Link represents a reference from one span to another span. diff --git a/vendor/go.opencensus.io/trace/config.go b/vendor/go.opencensus.io/trace/config.go index d5473a798c..775f8274fa 100644 --- a/vendor/go.opencensus.io/trace/config.go +++ b/vendor/go.opencensus.io/trace/config.go @@ -14,7 +14,11 @@ package trace -import "go.opencensus.io/trace/internal" +import ( + "sync" + + "go.opencensus.io/trace/internal" +) // Config represents the global tracing configuration. type Config struct { @@ -23,12 +27,42 @@ type Config struct { // IDGenerator is for internal use only. IDGenerator internal.IDGenerator + + // MaxAnnotationEventsPerSpan is max number of annotation events per span + MaxAnnotationEventsPerSpan int + + // MaxMessageEventsPerSpan is max number of message events per span + MaxMessageEventsPerSpan int + + // MaxAnnotationEventsPerSpan is max number of attributes per span + MaxAttributesPerSpan int + + // MaxLinksPerSpan is max number of links per span + MaxLinksPerSpan int } +var configWriteMu sync.Mutex + +const ( + // DefaultMaxAnnotationEventsPerSpan is default max number of annotation events per span + DefaultMaxAnnotationEventsPerSpan = 32 + + // DefaultMaxMessageEventsPerSpan is default max number of message events per span + DefaultMaxMessageEventsPerSpan = 128 + + // DefaultMaxAttributesPerSpan is default max number of attributes per span + DefaultMaxAttributesPerSpan = 32 + + // DefaultMaxLinksPerSpan is default max number of links per span + DefaultMaxLinksPerSpan = 32 +) + // ApplyConfig applies changes to the global tracing configuration. // // Fields not provided in the given config are going to be preserved. func ApplyConfig(cfg Config) { + configWriteMu.Lock() + defer configWriteMu.Unlock() c := *config.Load().(*Config) if cfg.DefaultSampler != nil { c.DefaultSampler = cfg.DefaultSampler @@ -36,5 +70,17 @@ func ApplyConfig(cfg Config) { if cfg.IDGenerator != nil { c.IDGenerator = cfg.IDGenerator } + if cfg.MaxAnnotationEventsPerSpan > 0 { + c.MaxAnnotationEventsPerSpan = cfg.MaxAnnotationEventsPerSpan + } + if cfg.MaxMessageEventsPerSpan > 0 { + c.MaxMessageEventsPerSpan = cfg.MaxMessageEventsPerSpan + } + if cfg.MaxAttributesPerSpan > 0 { + c.MaxAttributesPerSpan = cfg.MaxAttributesPerSpan + } + if cfg.MaxLinksPerSpan > 0 { + c.MaxLinksPerSpan = cfg.MaxLinksPerSpan + } config.Store(&c) } diff --git a/vendor/go.opencensus.io/trace/doc.go b/vendor/go.opencensus.io/trace/doc.go index db00044b14..04b1ee4f38 100644 --- a/vendor/go.opencensus.io/trace/doc.go +++ b/vendor/go.opencensus.io/trace/doc.go @@ -32,6 +32,8 @@ to sample a subset of traces, or use AlwaysSample to collect a trace on every ru trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) +Be careful about using trace.AlwaysSample in a production application with +significant traffic: a new trace will be started and exported for every request. Adding Spans to a Trace diff --git a/vendor/go.opencensus.io/trace/evictedqueue.go b/vendor/go.opencensus.io/trace/evictedqueue.go new file mode 100644 index 0000000000..ffc264f23d --- /dev/null +++ b/vendor/go.opencensus.io/trace/evictedqueue.go @@ -0,0 +1,38 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +type evictedQueue struct { + queue []interface{} + capacity int + droppedCount int +} + +func newEvictedQueue(capacity int) *evictedQueue { + eq := &evictedQueue{ + capacity: capacity, + queue: make([]interface{}, 0), + } + + return eq +} + +func (eq *evictedQueue) add(value interface{}) { + if len(eq.queue) == eq.capacity { + eq.queue = eq.queue[1:] + eq.droppedCount++ + } + eq.queue = append(eq.queue, value) +} diff --git a/vendor/go.opencensus.io/trace/export.go b/vendor/go.opencensus.io/trace/export.go index c522550fa1..e0d9a4b99e 100644 --- a/vendor/go.opencensus.io/trace/export.go +++ b/vendor/go.opencensus.io/trace/export.go @@ -16,6 +16,7 @@ package trace import ( "sync" + "sync/atomic" "time" ) @@ -30,9 +31,11 @@ type Exporter interface { ExportSpan(s *SpanData) } +type exportersMap map[Exporter]struct{} + var ( - exportersMu sync.Mutex - exporters map[Exporter]struct{} + exporterMu sync.Mutex + exporters atomic.Value ) // RegisterExporter adds to the list of Exporters that will receive sampled @@ -40,20 +43,31 @@ var ( // // Binaries can register exporters, libraries shouldn't register exporters. func RegisterExporter(e Exporter) { - exportersMu.Lock() - if exporters == nil { - exporters = make(map[Exporter]struct{}) + exporterMu.Lock() + new := make(exportersMap) + if old, ok := exporters.Load().(exportersMap); ok { + for k, v := range old { + new[k] = v + } } - exporters[e] = struct{}{} - exportersMu.Unlock() + new[e] = struct{}{} + exporters.Store(new) + exporterMu.Unlock() } // UnregisterExporter removes from the list of Exporters the Exporter that was // registered with the given name. func UnregisterExporter(e Exporter) { - exportersMu.Lock() - delete(exporters, e) - exportersMu.Unlock() + exporterMu.Lock() + new := make(exportersMap) + if old, ok := exporters.Load().(exportersMap); ok { + for k, v := range old { + new[k] = v + } + } + delete(new, e) + exporters.Store(new) + exporterMu.Unlock() } // SpanData contains all the information collected by a Span. @@ -71,6 +85,13 @@ type SpanData struct { Annotations []Annotation MessageEvents []MessageEvent Status - Links []Link - HasRemoteParent bool + Links []Link + HasRemoteParent bool + DroppedAttributeCount int + DroppedAnnotationCount int + DroppedMessageEventCount int + DroppedLinkCount int + + // ChildSpanCount holds the number of child span created for this span. + ChildSpanCount int } diff --git a/vendor/go.opencensus.io/trace/internal/internal.go b/vendor/go.opencensus.io/trace/internal/internal.go index 1c8b9b34b2..7e808d8f30 100644 --- a/vendor/go.opencensus.io/trace/internal/internal.go +++ b/vendor/go.opencensus.io/trace/internal/internal.go @@ -15,6 +15,7 @@ // Package internal provides trace internals. package internal +// IDGenerator allows custom generators for TraceId and SpanId. type IDGenerator interface { NewTraceID() [16]byte NewSpanID() [8]byte diff --git a/vendor/go.opencensus.io/trace/lrumap.go b/vendor/go.opencensus.io/trace/lrumap.go new file mode 100644 index 0000000000..3f80a33681 --- /dev/null +++ b/vendor/go.opencensus.io/trace/lrumap.go @@ -0,0 +1,37 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "github.com/hashicorp/golang-lru/simplelru" +) + +type lruMap struct { + simpleLruMap *simplelru.LRU + droppedCount int +} + +func newLruMap(size int) *lruMap { + lm := &lruMap{} + lm.simpleLruMap, _ = simplelru.NewLRU(size, nil) + return lm +} + +func (lm *lruMap) add(key, value interface{}) { + evicted := lm.simpleLruMap.Add(key, value) + if evicted { + lm.droppedCount++ + } +} diff --git a/vendor/go.opencensus.io/trace/sampling.go b/vendor/go.opencensus.io/trace/sampling.go index 313f8b68e2..71c10f9e3b 100644 --- a/vendor/go.opencensus.io/trace/sampling.go +++ b/vendor/go.opencensus.io/trace/sampling.go @@ -20,10 +20,6 @@ import ( const defaultSamplingProbability = 1e-4 -func newDefaultSampler() Sampler { - return ProbabilitySampler(defaultSamplingProbability) -} - // Sampler decides whether a trace should be sampled and exported. type Sampler func(SamplingParameters) SamplingDecision @@ -62,6 +58,9 @@ func ProbabilitySampler(fraction float64) Sampler { } // AlwaysSample returns a Sampler that samples every trace. +// Be careful about using this sampler in a production application with +// significant traffic: a new trace will be started and exported for every +// request. func AlwaysSample() Sampler { return func(p SamplingParameters) SamplingDecision { return SamplingDecision{Sample: true} diff --git a/vendor/go.opencensus.io/trace/trace.go b/vendor/go.opencensus.io/trace/trace.go index fc75b72f00..38ead7bf0a 100644 --- a/vendor/go.opencensus.io/trace/trace.go +++ b/vendor/go.opencensus.io/trace/trace.go @@ -25,6 +25,7 @@ import ( "time" "go.opencensus.io/internal" + "go.opencensus.io/trace/tracestate" ) // Span represents a span of a trace. It has an associated SpanContext, and @@ -41,6 +42,20 @@ type Span struct { data *SpanData mu sync.Mutex // protects the contents of *data (but not the pointer value.) spanContext SpanContext + + // lruAttributes are capped at configured limit. When the capacity is reached an oldest entry + // is removed to create room for a new entry. + lruAttributes *lruMap + + // annotations are stored in FIFO queue capped by configured limit. + annotations *evictedQueue + + // messageEvents are stored in FIFO queue capped by configured limit. + messageEvents *evictedQueue + + // links are stored in FIFO queue capped by configured limit. + links *evictedQueue + // spanStore is the spanStore this span belongs to, if any, otherwise it is nil. *spanStore endOnce sync.Once @@ -88,6 +103,7 @@ type SpanContext struct { TraceID TraceID SpanID SpanID TraceOptions TraceOptions + Tracestate *tracestate.Tracestate } type contextKey struct{} @@ -98,13 +114,6 @@ func FromContext(ctx context.Context) *Span { return s } -// WithSpan returns a new context with the given Span attached. -// -// Deprecated: Use NewContext. -func WithSpan(parent context.Context, s *Span) context.Context { - return NewContext(parent, s) -} - // NewContext returns a new context with the given Span attached. func NewContext(parent context.Context, s *Span) context.Context { return context.WithValue(parent, contextKey{}, s) @@ -154,10 +163,14 @@ func WithSampler(sampler Sampler) StartOption { // StartSpan starts a new child span of the current span in the context. If // there is no span in the context, creates a new trace and span. +// +// Returned context contains the newly created span. You can use it to +// propagate the returned span in process. func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) { var opts StartOptions var parent SpanContext if p := FromContext(ctx); p != nil { + p.addChild() parent = p.spanContext } for _, op := range o { @@ -174,6 +187,9 @@ func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Cont // // If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is // preferred for cases where the parent is propagated via an incoming request. +// +// Returned context contains the newly created span. You can use it to +// propagate the returned span in process. func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) { var opts StartOptions for _, op := range o { @@ -185,26 +201,6 @@ func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanCont return NewContext(ctx, span), span } -// NewSpan returns a new span. -// -// If parent is not nil, created span will be a child of the parent. -// -// Deprecated: Use StartSpan. -func NewSpan(name string, parent *Span, o StartOptions) *Span { - var parentSpanContext SpanContext - if parent != nil { - parentSpanContext = parent.SpanContext() - } - return startSpanInternal(name, parent != nil, parentSpanContext, false, o) -} - -// NewSpanWithRemoteParent returns a new span with the given parent SpanContext. -// -// Deprecated: Use StartSpanWithRemoteParent. -func NewSpanWithRemoteParent(name string, parent SpanContext, o StartOptions) *Span { - return startSpanInternal(name, true, parent, true, o) -} - func startSpanInternal(name string, hasParent bool, parent SpanContext, remoteParent bool, o StartOptions) *Span { span := &Span{} span.spanContext = parent @@ -245,6 +241,11 @@ func startSpanInternal(name string, hasParent bool, parent SpanContext, remotePa Name: name, HasRemoteParent: remoteParent, } + span.lruAttributes = newLruMap(cfg.MaxAttributesPerSpan) + span.annotations = newEvictedQueue(cfg.MaxAnnotationEventsPerSpan) + span.messageEvents = newEvictedQueue(cfg.MaxMessageEventsPerSpan) + span.links = newEvictedQueue(cfg.MaxLinksPerSpan) + if hasParent { span.data.ParentSpanID = parent.SpanID } @@ -262,26 +263,29 @@ func startSpanInternal(name string, hasParent bool, parent SpanContext, remotePa // End ends the span. func (s *Span) End() { + if s == nil { + return + } + if s.executionTracerTaskEnd != nil { + s.executionTracerTaskEnd() + } if !s.IsRecordingEvents() { return } s.endOnce.Do(func() { - if s.executionTracerTaskEnd != nil { - s.executionTracerTaskEnd() - } - // TODO: optimize to avoid this call if sd won't be used. - sd := s.makeSpanData() - sd.EndTime = internal.MonotonicEndTime(sd.StartTime) - if s.spanStore != nil { - s.spanStore.finished(s, sd) - } - if s.spanContext.IsSampled() { - // TODO: consider holding exportersMu for less time. - exportersMu.Lock() - for e := range exporters { - e.ExportSpan(sd) + exp, _ := exporters.Load().(exportersMap) + mustExport := s.spanContext.IsSampled() && len(exp) > 0 + if s.spanStore != nil || mustExport { + sd := s.makeSpanData() + sd.EndTime = internal.MonotonicEndTime(sd.StartTime) + if s.spanStore != nil { + s.spanStore.finished(s, sd) + } + if mustExport { + for e := range exp { + e.ExportSpan(sd) + } } - exportersMu.Unlock() } }) } @@ -292,11 +296,21 @@ func (s *Span) makeSpanData() *SpanData { var sd SpanData s.mu.Lock() sd = *s.data - if s.data.Attributes != nil { - sd.Attributes = make(map[string]interface{}) - for k, v := range s.data.Attributes { - sd.Attributes[k] = v - } + if s.lruAttributes.simpleLruMap.Len() > 0 { + sd.Attributes = s.lruAttributesToAttributeMap() + sd.DroppedAttributeCount = s.lruAttributes.droppedCount + } + if len(s.annotations.queue) > 0 { + sd.Annotations = s.interfaceArrayToAnnotationArray() + sd.DroppedAnnotationCount = s.annotations.droppedCount + } + if len(s.messageEvents.queue) > 0 { + sd.MessageEvents = s.interfaceArrayToMessageEventArray() + sd.DroppedMessageEventCount = s.messageEvents.droppedCount + } + if len(s.links.queue) > 0 { + sd.Links = s.interfaceArrayToLinksArray() + sd.DroppedLinkCount = s.links.droppedCount } s.mu.Unlock() return &sd @@ -330,6 +344,57 @@ func (s *Span) SetStatus(status Status) { s.mu.Unlock() } +func (s *Span) interfaceArrayToLinksArray() []Link { + linksArr := make([]Link, 0) + for _, value := range s.links.queue { + linksArr = append(linksArr, value.(Link)) + } + return linksArr +} + +func (s *Span) interfaceArrayToMessageEventArray() []MessageEvent { + messageEventArr := make([]MessageEvent, 0) + for _, value := range s.messageEvents.queue { + messageEventArr = append(messageEventArr, value.(MessageEvent)) + } + return messageEventArr +} + +func (s *Span) interfaceArrayToAnnotationArray() []Annotation { + annotationArr := make([]Annotation, 0) + for _, value := range s.annotations.queue { + annotationArr = append(annotationArr, value.(Annotation)) + } + return annotationArr +} + +func (s *Span) lruAttributesToAttributeMap() map[string]interface{} { + attributes := make(map[string]interface{}) + for _, key := range s.lruAttributes.simpleLruMap.Keys() { + value, ok := s.lruAttributes.simpleLruMap.Get(key) + if ok { + keyStr := key.(string) + attributes[keyStr] = value + } + } + return attributes +} + +func (s *Span) copyToCappedAttributes(attributes []Attribute) { + for _, a := range attributes { + s.lruAttributes.add(a.key, a.value) + } +} + +func (s *Span) addChild() { + if !s.IsRecordingEvents() { + return + } + s.mu.Lock() + s.data.ChildSpanCount++ + s.mu.Unlock() +} + // AddAttributes sets attributes in the span. // // Existing attributes whose keys appear in the attributes parameter are overwritten. @@ -338,10 +403,7 @@ func (s *Span) AddAttributes(attributes ...Attribute) { return } s.mu.Lock() - if s.data.Attributes == nil { - s.data.Attributes = make(map[string]interface{}) - } - copyAttributes(s.data.Attributes, attributes) + s.copyToCappedAttributes(attributes) s.mu.Unlock() } @@ -361,7 +423,7 @@ func (s *Span) lazyPrintfInternal(attributes []Attribute, format string, a ...in m = make(map[string]interface{}) copyAttributes(m, attributes) } - s.data.Annotations = append(s.data.Annotations, Annotation{ + s.annotations.add(Annotation{ Time: now, Message: msg, Attributes: m, @@ -377,7 +439,7 @@ func (s *Span) printStringInternal(attributes []Attribute, str string) { a = make(map[string]interface{}) copyAttributes(a, attributes) } - s.data.Annotations = append(s.data.Annotations, Annotation{ + s.annotations.add(Annotation{ Time: now, Message: str, Attributes: a, @@ -414,7 +476,7 @@ func (s *Span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedBy } now := time.Now() s.mu.Lock() - s.data.MessageEvents = append(s.data.MessageEvents, MessageEvent{ + s.messageEvents.add(MessageEvent{ Time: now, EventType: MessageEventTypeSent, MessageID: messageID, @@ -436,7 +498,7 @@ func (s *Span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compresse } now := time.Now() s.mu.Lock() - s.data.MessageEvents = append(s.data.MessageEvents, MessageEvent{ + s.messageEvents.add(MessageEvent{ Time: now, EventType: MessageEventTypeRecv, MessageID: messageID, @@ -452,7 +514,7 @@ func (s *Span) AddLink(l Link) { return } s.mu.Lock() - s.data.Links = append(s.data.Links, l) + s.links.add(l) s.mu.Unlock() } @@ -484,29 +546,39 @@ func init() { gen.spanIDInc |= 1 config.Store(&Config{ - DefaultSampler: ProbabilitySampler(defaultSamplingProbability), - IDGenerator: gen, + DefaultSampler: ProbabilitySampler(defaultSamplingProbability), + IDGenerator: gen, + MaxAttributesPerSpan: DefaultMaxAttributesPerSpan, + MaxAnnotationEventsPerSpan: DefaultMaxAnnotationEventsPerSpan, + MaxMessageEventsPerSpan: DefaultMaxMessageEventsPerSpan, + MaxLinksPerSpan: DefaultMaxLinksPerSpan, }) } type defaultIDGenerator struct { sync.Mutex - traceIDRand *rand.Rand + + // Please keep these as the first fields + // so that these 8 byte fields will be aligned on addresses + // divisible by 8, on both 32-bit and 64-bit machines when + // performing atomic increments and accesses. + // See: + // * https://github.com/census-instrumentation/opencensus-go/issues/587 + // * https://github.com/census-instrumentation/opencensus-go/issues/865 + // * https://golang.org/pkg/sync/atomic/#pkg-note-BUG + nextSpanID uint64 + spanIDInc uint64 + traceIDAdd [2]uint64 - nextSpanID uint64 - spanIDInc uint64 + traceIDRand *rand.Rand } // NewSpanID returns a non-zero span ID from a randomly-chosen sequence. -// mu should be held while this function is called. func (gen *defaultIDGenerator) NewSpanID() [8]byte { - gen.Lock() - id := gen.nextSpanID - gen.nextSpanID += gen.spanIDInc - if gen.nextSpanID == 0 { - gen.nextSpanID += gen.spanIDInc + var id uint64 + for id == 0 { + id = atomic.AddUint64(&gen.nextSpanID, gen.spanIDInc) } - gen.Unlock() var sid [8]byte binary.LittleEndian.PutUint64(sid[:], id) return sid diff --git a/vendor/go.opencensus.io/trace/tracestate/tracestate.go b/vendor/go.opencensus.io/trace/tracestate/tracestate.go new file mode 100644 index 0000000000..2d6c713eb3 --- /dev/null +++ b/vendor/go.opencensus.io/trace/tracestate/tracestate.go @@ -0,0 +1,147 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package tracestate implements support for the Tracestate header of the +// W3C TraceContext propagation format. +package tracestate + +import ( + "fmt" + "regexp" +) + +const ( + keyMaxSize = 256 + valueMaxSize = 256 + maxKeyValuePairs = 32 +) + +const ( + keyWithoutVendorFormat = `[a-z][_0-9a-z\-\*\/]{0,255}` + keyWithVendorFormat = `[a-z][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}` + keyFormat = `(` + keyWithoutVendorFormat + `)|(` + keyWithVendorFormat + `)` + valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]` +) + +var keyValidationRegExp = regexp.MustCompile(`^(` + keyFormat + `)$`) +var valueValidationRegExp = regexp.MustCompile(`^(` + valueFormat + `)$`) + +// Tracestate represents tracing-system specific context in a list of key-value pairs. Tracestate allows different +// vendors propagate additional information and inter-operate with their legacy Id formats. +type Tracestate struct { + entries []Entry +} + +// Entry represents one key-value pair in a list of key-value pair of Tracestate. +type Entry struct { + // Key is an opaque string up to 256 characters printable. It MUST begin with a lowercase letter, + // and can only contain lowercase letters a-z, digits 0-9, underscores _, dashes -, asterisks *, and + // forward slashes /. + Key string + + // Value is an opaque string up to 256 characters printable ASCII RFC0020 characters (i.e., the + // range 0x20 to 0x7E) except comma , and =. + Value string +} + +// Entries returns a slice of Entry. +func (ts *Tracestate) Entries() []Entry { + if ts == nil { + return nil + } + return ts.entries +} + +func (ts *Tracestate) remove(key string) *Entry { + for index, entry := range ts.entries { + if entry.Key == key { + ts.entries = append(ts.entries[:index], ts.entries[index+1:]...) + return &entry + } + } + return nil +} + +func (ts *Tracestate) add(entries []Entry) error { + for _, entry := range entries { + ts.remove(entry.Key) + } + if len(ts.entries)+len(entries) > maxKeyValuePairs { + return fmt.Errorf("adding %d key-value pairs to current %d pairs exceeds the limit of %d", + len(entries), len(ts.entries), maxKeyValuePairs) + } + ts.entries = append(entries, ts.entries...) + return nil +} + +func isValid(entry Entry) bool { + return keyValidationRegExp.MatchString(entry.Key) && + valueValidationRegExp.MatchString(entry.Value) +} + +func containsDuplicateKey(entries ...Entry) (string, bool) { + keyMap := make(map[string]int) + for _, entry := range entries { + if _, ok := keyMap[entry.Key]; ok { + return entry.Key, true + } + keyMap[entry.Key] = 1 + } + return "", false +} + +func areEntriesValid(entries ...Entry) (*Entry, bool) { + for _, entry := range entries { + if !isValid(entry) { + return &entry, false + } + } + return nil, true +} + +// New creates a Tracestate object from a parent and/or entries (key-value pair). +// Entries from the parent are copied if present. The entries passed to this function +// are inserted in front of those copied from the parent. If an entry copied from the +// parent contains the same key as one of the entry in entries then the entry copied +// from the parent is removed. See add func. +// +// An error is returned with nil Tracestate if +// 1. one or more entry in entries is invalid. +// 2. two or more entries in the input entries have the same key. +// 3. the number of entries combined from the parent and the input entries exceeds maxKeyValuePairs. +// (duplicate entry is counted only once). +func New(parent *Tracestate, entries ...Entry) (*Tracestate, error) { + if parent == nil && len(entries) == 0 { + return nil, nil + } + if entry, ok := areEntriesValid(entries...); !ok { + return nil, fmt.Errorf("key-value pair {%s, %s} is invalid", entry.Key, entry.Value) + } + + if key, duplicate := containsDuplicateKey(entries...); duplicate { + return nil, fmt.Errorf("contains duplicate keys (%s)", key) + } + + tracestate := Tracestate{} + + if parent != nil && len(parent.entries) > 0 { + tracestate.entries = append([]Entry{}, parent.entries...) + } + + err := tracestate.add(entries) + if err != nil { + return nil, err + } + return &tracestate, nil +}